summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-11 11:23:06 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-11 11:23:06 -0700
commitf29135b54bcbfe1fea97d94e2ae860bade1d5a31 (patch)
treecd6cf1887c689b0fdd802bb8f16d3253adbc54c0
parent4c609922a3ae0248597785d1f9adc8f142a80aef (diff)
parent19c4d2f994788a954af1aa7e53b0fdb46fd7925a (diff)
downloadlinux-stable-f29135b54bcbfe1fea97d94e2ae860bade1d5a31.tar.gz
linux-stable-f29135b54bcbfe1fea97d94e2ae860bade1d5a31.tar.bz2
linux-stable-f29135b54bcbfe1fea97d94e2ae860bade1d5a31.zip
Merge branch 'for-linus-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs updates from Chris Mason: "This is a big variety of fixes and cleanups. Liu Bo continues to fixup fuzzer related problems, and some of Josef's cleanups are prep for his bigger extent buffer changes (slated for v4.10)" * 'for-linus-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (39 commits) Revert "btrfs: let btrfs_delete_unused_bgs() to clean relocated bgs" Btrfs: remove unnecessary btrfs_mark_buffer_dirty in split_leaf Btrfs: don't BUG() during drop snapshot btrfs: fix btrfs_no_printk stub helper Btrfs: memset to avoid stale content in btree leaf btrfs: parent_start initialization cleanup btrfs: Remove already completed TODO comment btrfs: Do not reassign count in btrfs_run_delayed_refs btrfs: fix a possible umount deadlock Btrfs: fix memory leak in do_walk_down btrfs: btrfs_debug should consume fs_info when DEBUG is not defined btrfs: convert send's verbose_printk to btrfs_debug btrfs: convert pr_* to btrfs_* where possible btrfs: convert printk(KERN_* to use pr_* calls btrfs: unsplit printed strings btrfs: clean the old superblocks before freeing the device Btrfs: kill BUG_ON in run_delayed_tree_ref Btrfs: don't leak reloc root nodes on error btrfs: squash lines for simple wrapper functions Btrfs: improve check_node to avoid reading corrupted nodes ...
-rw-r--r--fs/btrfs/backref.c409
-rw-r--r--fs/btrfs/btrfs_inode.h11
-rw-r--r--fs/btrfs/check-integrity.c342
-rw-r--r--fs/btrfs/compression.c6
-rw-r--r--fs/btrfs/ctree.c56
-rw-r--r--fs/btrfs/ctree.h116
-rw-r--r--fs/btrfs/delayed-inode.c25
-rw-r--r--fs/btrfs/delayed-ref.c15
-rw-r--r--fs/btrfs/dev-replace.c21
-rw-r--r--fs/btrfs/dir-item.c7
-rw-r--r--fs/btrfs/disk-io.c237
-rw-r--r--fs/btrfs/disk-io.h2
-rw-r--r--fs/btrfs/extent-tree.c198
-rw-r--r--fs/btrfs/extent_io.c170
-rw-r--r--fs/btrfs/extent_io.h4
-rw-r--r--fs/btrfs/file.c43
-rw-r--r--fs/btrfs/free-space-cache.c21
-rw-r--r--fs/btrfs/free-space-cache.h6
-rw-r--r--fs/btrfs/free-space-tree.c20
-rw-r--r--fs/btrfs/inode-map.c31
-rw-r--r--fs/btrfs/inode.c70
-rw-r--r--fs/btrfs/ioctl.c14
-rw-r--r--fs/btrfs/lzo.c6
-rw-r--r--fs/btrfs/ordered-data.c4
-rw-r--r--fs/btrfs/print-tree.c93
-rw-r--r--fs/btrfs/qgroup.c77
-rw-r--r--fs/btrfs/raid56.c5
-rw-r--r--fs/btrfs/reada.c32
-rw-r--r--fs/btrfs/relocation.c47
-rw-r--r--fs/btrfs/root-tree.c18
-rw-r--r--fs/btrfs/scrub.c58
-rw-r--r--fs/btrfs/send.c79
-rw-r--r--fs/btrfs/super.c62
-rw-r--r--fs/btrfs/sysfs.c19
-rw-r--r--fs/btrfs/tests/inode-tests.c12
-rw-r--r--fs/btrfs/tests/qgroup-tests.c2
-rw-r--r--fs/btrfs/transaction.c49
-rw-r--r--fs/btrfs/transaction.h1
-rw-r--r--fs/btrfs/tree-log.c12
-rw-r--r--fs/btrfs/uuid-tree.c27
-rw-r--r--fs/btrfs/volumes.c197
-rw-r--r--fs/btrfs/volumes.h2
-rw-r--r--fs/btrfs/zlib.c8
43 files changed, 1563 insertions, 1071 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 455a6b2fd539..85dc7ab8f89e 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -17,6 +17,7 @@
*/
#include <linux/vmalloc.h>
+#include <linux/rbtree.h>
#include "ctree.h"
#include "disk-io.h"
#include "backref.h"
@@ -34,6 +35,265 @@ struct extent_inode_elem {
struct extent_inode_elem *next;
};
+/*
+ * ref_root is used as the root of the ref tree that hold a collection
+ * of unique references.
+ */
+struct ref_root {
+ struct rb_root rb_root;
+
+ /*
+ * The unique_refs represents the number of ref_nodes with a positive
+ * count stored in the tree. Even if a ref_node (the count is greater
+ * than one) is added, the unique_refs will only increase by one.
+ */
+ unsigned int unique_refs;
+};
+
+/* ref_node is used to store a unique reference to the ref tree. */
+struct ref_node {
+ struct rb_node rb_node;
+
+ /* For NORMAL_REF, otherwise all these fields should be set to 0 */
+ u64 root_id;
+ u64 object_id;
+ u64 offset;
+
+ /* For SHARED_REF, otherwise parent field should be set to 0 */
+ u64 parent;
+
+ /* Ref to the ref_mod of btrfs_delayed_ref_node */
+ int ref_mod;
+};
+
+/* Dynamically allocate and initialize a ref_root */
+static struct ref_root *ref_root_alloc(void)
+{
+ struct ref_root *ref_tree;
+
+ ref_tree = kmalloc(sizeof(*ref_tree), GFP_NOFS);
+ if (!ref_tree)
+ return NULL;
+
+ ref_tree->rb_root = RB_ROOT;
+ ref_tree->unique_refs = 0;
+
+ return ref_tree;
+}
+
+/* Free all nodes in the ref tree, and reinit ref_root */
+static void ref_root_fini(struct ref_root *ref_tree)
+{
+ struct ref_node *node;
+ struct rb_node *next;
+
+ while ((next = rb_first(&ref_tree->rb_root)) != NULL) {
+ node = rb_entry(next, struct ref_node, rb_node);
+ rb_erase(next, &ref_tree->rb_root);
+ kfree(node);
+ }
+
+ ref_tree->rb_root = RB_ROOT;
+ ref_tree->unique_refs = 0;
+}
+
+static void ref_root_free(struct ref_root *ref_tree)
+{
+ if (!ref_tree)
+ return;
+
+ ref_root_fini(ref_tree);
+ kfree(ref_tree);
+}
+
+/*
+ * Compare ref_node with (root_id, object_id, offset, parent)
+ *
+ * The function compares two ref_node a and b. It returns an integer less
+ * than, equal to, or greater than zero , respectively, to be less than, to
+ * equal, or be greater than b.
+ */
+static int ref_node_cmp(struct ref_node *a, struct ref_node *b)
+{
+ if (a->root_id < b->root_id)
+ return -1;
+ else if (a->root_id > b->root_id)
+ return 1;
+
+ if (a->object_id < b->object_id)
+ return -1;
+ else if (a->object_id > b->object_id)
+ return 1;
+
+ if (a->offset < b->offset)
+ return -1;
+ else if (a->offset > b->offset)
+ return 1;
+
+ if (a->parent < b->parent)
+ return -1;
+ else if (a->parent > b->parent)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Search ref_node with (root_id, object_id, offset, parent) in the tree
+ *
+ * if found, the pointer of the ref_node will be returned;
+ * if not found, NULL will be returned and pos will point to the rb_node for
+ * insert, pos_parent will point to pos'parent for insert;
+*/
+static struct ref_node *__ref_tree_search(struct ref_root *ref_tree,
+ struct rb_node ***pos,
+ struct rb_node **pos_parent,
+ u64 root_id, u64 object_id,
+ u64 offset, u64 parent)
+{
+ struct ref_node *cur = NULL;
+ struct ref_node entry;
+ int ret;
+
+ entry.root_id = root_id;
+ entry.object_id = object_id;
+ entry.offset = offset;
+ entry.parent = parent;
+
+ *pos = &ref_tree->rb_root.rb_node;
+
+ while (**pos) {
+ *pos_parent = **pos;
+ cur = rb_entry(*pos_parent, struct ref_node, rb_node);
+
+ ret = ref_node_cmp(cur, &entry);
+ if (ret > 0)
+ *pos = &(**pos)->rb_left;
+ else if (ret < 0)
+ *pos = &(**pos)->rb_right;
+ else
+ return cur;
+ }
+
+ return NULL;
+}
+
+/*
+ * Insert a ref_node to the ref tree
+ * @pos used for specifiy the position to insert
+ * @pos_parent for specifiy pos's parent
+ *
+ * success, return 0;
+ * ref_node already exists, return -EEXIST;
+*/
+static int ref_tree_insert(struct ref_root *ref_tree, struct rb_node **pos,
+ struct rb_node *pos_parent, struct ref_node *ins)
+{
+ struct rb_node **p = NULL;
+ struct rb_node *parent = NULL;
+ struct ref_node *cur = NULL;
+
+ if (!pos) {
+ cur = __ref_tree_search(ref_tree, &p, &parent, ins->root_id,
+ ins->object_id, ins->offset,
+ ins->parent);
+ if (cur)
+ return -EEXIST;
+ } else {
+ p = pos;
+ parent = pos_parent;
+ }
+
+ rb_link_node(&ins->rb_node, parent, p);
+ rb_insert_color(&ins->rb_node, &ref_tree->rb_root);
+
+ return 0;
+}
+
+/* Erase and free ref_node, caller should update ref_root->unique_refs */
+static void ref_tree_remove(struct ref_root *ref_tree, struct ref_node *node)
+{
+ rb_erase(&node->rb_node, &ref_tree->rb_root);
+ kfree(node);
+}
+
+/*
+ * Update ref_root->unique_refs
+ *
+ * Call __ref_tree_search
+ * 1. if ref_node doesn't exist, ref_tree_insert this node, and update
+ * ref_root->unique_refs:
+ * if ref_node->ref_mod > 0, ref_root->unique_refs++;
+ * if ref_node->ref_mod < 0, do noting;
+ *
+ * 2. if ref_node is found, then get origin ref_node->ref_mod, and update
+ * ref_node->ref_mod.
+ * if ref_node->ref_mod is equal to 0,then call ref_tree_remove
+ *
+ * according to origin_mod and new_mod, update ref_root->items
+ * +----------------+--------------+-------------+
+ * | |new_count <= 0|new_count > 0|
+ * +----------------+--------------+-------------+
+ * |origin_count < 0| 0 | 1 |
+ * +----------------+--------------+-------------+
+ * |origin_count > 0| -1 | 0 |
+ * +----------------+--------------+-------------+
+ *
+ * In case of allocation failure, -ENOMEM is returned and the ref_tree stays
+ * unaltered.
+ * Success, return 0
+ */
+static int ref_tree_add(struct ref_root *ref_tree, u64 root_id, u64 object_id,
+ u64 offset, u64 parent, int count)
+{
+ struct ref_node *node = NULL;
+ struct rb_node **pos = NULL;
+ struct rb_node *pos_parent = NULL;
+ int origin_count;
+ int ret;
+
+ if (!count)
+ return 0;
+
+ node = __ref_tree_search(ref_tree, &pos, &pos_parent, root_id,
+ object_id, offset, parent);
+ if (node == NULL) {
+ node = kmalloc(sizeof(*node), GFP_NOFS);
+ if (!node)
+ return -ENOMEM;
+
+ node->root_id = root_id;
+ node->object_id = object_id;
+ node->offset = offset;
+ node->parent = parent;
+ node->ref_mod = count;
+
+ ret = ref_tree_insert(ref_tree, pos, pos_parent, node);
+ ASSERT(!ret);
+ if (ret) {
+ kfree(node);
+ return ret;
+ }
+
+ ref_tree->unique_refs += node->ref_mod > 0 ? 1 : 0;
+
+ return 0;
+ }
+
+ origin_count = node->ref_mod;
+ node->ref_mod += count;
+
+ if (node->ref_mod > 0)
+ ref_tree->unique_refs += origin_count > 0 ? 0 : 1;
+ else if (node->ref_mod <= 0)
+ ref_tree->unique_refs += origin_count > 0 ? -1 : 0;
+
+ if (!node->ref_mod)
+ ref_tree_remove(ref_tree, node);
+
+ return 0;
+}
+
static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
struct btrfs_file_extent_item *fi,
u64 extent_item_pos,
@@ -390,8 +650,8 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
/* root node has been locked, we can release @subvol_srcu safely here */
srcu_read_unlock(&fs_info->subvol_srcu, index);
- pr_debug("search slot in root %llu (level %d, ref count %d) returned "
- "%d for key (%llu %u %llu)\n",
+ btrfs_debug(fs_info,
+ "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
ref->root_id, level, ref->count, ret,
ref->key_for_search.objectid, ref->key_for_search.type,
ref->key_for_search.offset);
@@ -700,6 +960,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
static int __add_inline_refs(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u64 bytenr,
int *info_level, struct list_head *prefs,
+ struct ref_root *ref_tree,
u64 *total_refs, u64 inum)
{
int ret = 0;
@@ -767,6 +1028,13 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
count = btrfs_shared_data_ref_count(leaf, sdref);
ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
bytenr, count, GFP_NOFS);
+ if (ref_tree) {
+ if (!ret)
+ ret = ref_tree_add(ref_tree, 0, 0, 0,
+ bytenr, count);
+ if (!ret && ref_tree->unique_refs > 1)
+ ret = BACKREF_FOUND_SHARED;
+ }
break;
}
case BTRFS_TREE_BLOCK_REF_KEY:
@@ -794,6 +1062,15 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
root = btrfs_extent_data_ref_root(leaf, dref);
ret = __add_prelim_ref(prefs, root, &key, 0, 0,
bytenr, count, GFP_NOFS);
+ if (ref_tree) {
+ if (!ret)
+ ret = ref_tree_add(ref_tree, root,
+ key.objectid,
+ key.offset, 0,
+ count);
+ if (!ret && ref_tree->unique_refs > 1)
+ ret = BACKREF_FOUND_SHARED;
+ }
break;
}
default:
@@ -812,7 +1089,8 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
*/
static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u64 bytenr,
- int info_level, struct list_head *prefs, u64 inum)
+ int info_level, struct list_head *prefs,
+ struct ref_root *ref_tree, u64 inum)
{
struct btrfs_root *extent_root = fs_info->extent_root;
int ret;
@@ -855,6 +1133,13 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
count = btrfs_shared_data_ref_count(leaf, sdref);
ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
bytenr, count, GFP_NOFS);
+ if (ref_tree) {
+ if (!ret)
+ ret = ref_tree_add(ref_tree, 0, 0, 0,
+ bytenr, count);
+ if (!ret && ref_tree->unique_refs > 1)
+ ret = BACKREF_FOUND_SHARED;
+ }
break;
}
case BTRFS_TREE_BLOCK_REF_KEY:
@@ -883,6 +1168,15 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
root = btrfs_extent_data_ref_root(leaf, dref);
ret = __add_prelim_ref(prefs, root, &key, 0, 0,
bytenr, count, GFP_NOFS);
+ if (ref_tree) {
+ if (!ret)
+ ret = ref_tree_add(ref_tree, root,
+ key.objectid,
+ key.offset, 0,
+ count);
+ if (!ret && ref_tree->unique_refs > 1)
+ ret = BACKREF_FOUND_SHARED;
+ }
break;
}
default:
@@ -909,13 +1203,16 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
* commit root.
* The special case is for qgroup to search roots in commit_transaction().
*
+ * If check_shared is set to 1, any extent has more than one ref item, will
+ * be returned BACKREF_FOUND_SHARED immediately.
+ *
* FIXME some caching might speed things up
*/
static int find_parent_nodes(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 time_seq, struct ulist *refs,
struct ulist *roots, const u64 *extent_item_pos,
- u64 root_objectid, u64 inum)
+ u64 root_objectid, u64 inum, int check_shared)
{
struct btrfs_key key;
struct btrfs_path *path;
@@ -927,6 +1224,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
struct list_head prefs;
struct __prelim_ref *ref;
struct extent_inode_elem *eie = NULL;
+ struct ref_root *ref_tree = NULL;
u64 total_refs = 0;
INIT_LIST_HEAD(&prefs);
@@ -958,6 +1256,18 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
again:
head = NULL;
+ if (check_shared) {
+ if (!ref_tree) {
+ ref_tree = ref_root_alloc();
+ if (!ref_tree) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ } else {
+ ref_root_fini(ref_tree);
+ }
+ }
+
ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
if (ret < 0)
goto out;
@@ -1002,6 +1312,36 @@ again:
} else {
spin_unlock(&delayed_refs->lock);
}
+
+ if (check_shared && !list_empty(&prefs_delayed)) {
+ /*
+ * Add all delay_ref to the ref_tree and check if there
+ * are multiple ref items added.
+ */
+ list_for_each_entry(ref, &prefs_delayed, list) {
+ if (ref->key_for_search.type) {
+ ret = ref_tree_add(ref_tree,
+ ref->root_id,
+ ref->key_for_search.objectid,
+ ref->key_for_search.offset,
+ 0, ref->count);
+ if (ret)
+ goto out;
+ } else {
+ ret = ref_tree_add(ref_tree, 0, 0, 0,
+ ref->parent, ref->count);
+ if (ret)
+ goto out;
+ }
+
+ }
+
+ if (ref_tree->unique_refs > 1) {
+ ret = BACKREF_FOUND_SHARED;
+ goto out;
+ }
+
+ }
}
if (path->slots[0]) {
@@ -1017,11 +1357,13 @@ again:
key.type == BTRFS_METADATA_ITEM_KEY)) {
ret = __add_inline_refs(fs_info, path, bytenr,
&info_level, &prefs,
- &total_refs, inum);
+ ref_tree, &total_refs,
+ inum);
if (ret)
goto out;
ret = __add_keyed_refs(fs_info, path, bytenr,
- info_level, &prefs, inum);
+ info_level, &prefs,
+ ref_tree, inum);
if (ret)
goto out;
}
@@ -1106,6 +1448,7 @@ again:
out:
btrfs_free_path(path);
+ ref_root_free(ref_tree);
while (!list_empty(&prefs)) {
ref = list_first_entry(&prefs, struct __prelim_ref, list);
list_del(&ref->list);
@@ -1159,8 +1502,8 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
if (!*leafs)
return -ENOMEM;
- ret = find_parent_nodes(trans, fs_info, bytenr,
- time_seq, *leafs, NULL, extent_item_pos, 0, 0);
+ ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
+ *leafs, NULL, extent_item_pos, 0, 0, 0);
if (ret < 0 && ret != -ENOENT) {
free_leaf_list(*leafs);
return ret;
@@ -1202,8 +1545,8 @@ static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans,
ULIST_ITER_INIT(&uiter);
while (1) {
- ret = find_parent_nodes(trans, fs_info, bytenr,
- time_seq, tmp, *roots, NULL, 0, 0);
+ ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
+ tmp, *roots, NULL, 0, 0, 0);
if (ret < 0 && ret != -ENOENT) {
ulist_free(tmp);
ulist_free(*roots);
@@ -1273,7 +1616,7 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans,
ULIST_ITER_INIT(&uiter);
while (1) {
ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
- roots, NULL, root_objectid, inum);
+ roots, NULL, root_objectid, inum, 1);
if (ret == BACKREF_FOUND_SHARED) {
/* this is the only condition under which we return 1 */
ret = 1;
@@ -1492,7 +1835,8 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
if (found_key->objectid > logical ||
found_key->objectid + size <= logical) {
- pr_debug("logical %llu is not within any extent\n", logical);
+ btrfs_debug(fs_info,
+ "logical %llu is not within any extent", logical);
return -ENOENT;
}
@@ -1503,8 +1847,8 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
flags = btrfs_extent_flags(eb, ei);
- pr_debug("logical %llu is at position %llu within the extent (%llu "
- "EXTENT_ITEM %llu) flags %#llx size %u\n",
+ btrfs_debug(fs_info,
+ "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
logical, logical - found_key->objectid, found_key->objectid,
found_key->offset, flags, item_size);
@@ -1625,21 +1969,24 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
return 0;
}
-static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
- u64 root, u64 extent_item_objectid,
- iterate_extent_inodes_t *iterate, void *ctx)
+static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
+ struct extent_inode_elem *inode_list,
+ u64 root, u64 extent_item_objectid,
+ iterate_extent_inodes_t *iterate, void *ctx)
{
struct extent_inode_elem *eie;
int ret = 0;
for (eie = inode_list; eie; eie = eie->next) {
- pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
- "root %llu\n", extent_item_objectid,
- eie->inum, eie->offset, root);
+ btrfs_debug(fs_info,
+ "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
+ extent_item_objectid, eie->inum,
+ eie->offset, root);
ret = iterate(eie->inum, eie->offset, root, ctx);
if (ret) {
- pr_debug("stopping iteration for %llu due to ret=%d\n",
- extent_item_objectid, ret);
+ btrfs_debug(fs_info,
+ "stopping iteration for %llu due to ret=%d",
+ extent_item_objectid, ret);
break;
}
}
@@ -1667,7 +2014,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
struct ulist_iterator ref_uiter;
struct ulist_iterator root_uiter;
- pr_debug("resolving all inodes for extent %llu\n",
+ btrfs_debug(fs_info, "resolving all inodes for extent %llu",
extent_item_objectid);
if (!search_commit_root) {
@@ -1693,10 +2040,12 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
break;
ULIST_ITER_INIT(&root_uiter);
while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
- pr_debug("root %llu references leaf %llu, data list "
- "%#llx\n", root_node->val, ref_node->val,
- ref_node->aux);
- ret = iterate_leaf_refs((struct extent_inode_elem *)
+ btrfs_debug(fs_info,
+ "root %llu references leaf %llu, data list %#llx",
+ root_node->val, ref_node->val,
+ ref_node->aux);
+ ret = iterate_leaf_refs(fs_info,
+ (struct extent_inode_elem *)
(uintptr_t)ref_node->aux,
root_node->val,
extent_item_objectid,
@@ -1792,9 +2141,9 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
name_len = btrfs_inode_ref_name_len(eb, iref);
/* path must be released before calling iterate()! */
- pr_debug("following ref at offset %u for inode %llu in "
- "tree %llu\n", cur, found_key.objectid,
- fs_root->objectid);
+ btrfs_debug(fs_root->fs_info,
+ "following ref at offset %u for inode %llu in tree %llu",
+ cur, found_key.objectid, fs_root->objectid);
ret = iterate(parent, name_len,
(unsigned long)(iref + 1), eb, ctx);
if (ret)
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 4919aedb5fc1..1a8fa46ff87e 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -44,17 +44,6 @@
#define BTRFS_INODE_IN_DELALLOC_LIST 9
#define BTRFS_INODE_READDIO_NEED_LOCK 10
#define BTRFS_INODE_HAS_PROPS 11
-/*
- * The following 3 bits are meant only for the btree inode.
- * When any of them is set, it means an error happened while writing an
- * extent buffer belonging to:
- * 1) a non-log btree
- * 2) a log btree and first log sub-transaction
- * 3) a log btree and second log sub-transaction
- */
-#define BTRFS_INODE_BTREE_ERR 12
-#define BTRFS_INODE_BTREE_LOG1_ERR 13
-#define BTRFS_INODE_BTREE_LOG2_ERR 14
/* in memory btrfs inode */
struct btrfs_inode {
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 66789471b49d..8e99251650b3 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -656,7 +656,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
BUG_ON(NULL == state);
selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS);
if (NULL == selected_super) {
- printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+ pr_info("btrfsic: error, kmalloc failed!\n");
return -ENOMEM;
}
@@ -681,7 +681,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
}
if (NULL == state->latest_superblock) {
- printk(KERN_INFO "btrfsic: no superblock found!\n");
+ pr_info("btrfsic: no superblock found!\n");
kfree(selected_super);
return -1;
}
@@ -698,13 +698,13 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
next_bytenr = btrfs_super_root(selected_super);
if (state->print_mask &
BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- printk(KERN_INFO "root@%llu\n", next_bytenr);
+ pr_info("root@%llu\n", next_bytenr);
break;
case 1:
next_bytenr = btrfs_super_chunk_root(selected_super);
if (state->print_mask &
BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- printk(KERN_INFO "chunk@%llu\n", next_bytenr);
+ pr_info("chunk@%llu\n", next_bytenr);
break;
case 2:
next_bytenr = btrfs_super_log_root(selected_super);
@@ -712,7 +712,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
continue;
if (state->print_mask &
BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- printk(KERN_INFO "log@%llu\n", next_bytenr);
+ pr_info("log@%llu\n", next_bytenr);
break;
}
@@ -720,7 +720,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
btrfs_num_copies(state->root->fs_info,
next_bytenr, state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
- printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+ pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, num_copies);
for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
@@ -733,9 +733,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
&tmp_next_block_ctx,
mirror_num);
if (ret) {
- printk(KERN_INFO "btrfsic:"
- " btrfsic_map_block(root @%llu,"
- " mirror %d) failed!\n",
+ pr_info("btrfsic: btrfsic_map_block(root @%llu, mirror %d) failed!\n",
next_bytenr, mirror_num);
kfree(selected_super);
return -1;
@@ -758,8 +756,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
ret = btrfsic_read_block(state, &tmp_next_block_ctx);
if (ret < (int)PAGE_SIZE) {
- printk(KERN_INFO
- "btrfsic: read @logical %llu failed!\n",
+ pr_info("btrfsic: read @logical %llu failed!\n",
tmp_next_block_ctx.start);
btrfsic_release_block_ctx(&tmp_next_block_ctx);
kfree(selected_super);
@@ -820,7 +817,7 @@ static int btrfsic_process_superblock_dev_mirror(
if (NULL == superblock_tmp) {
superblock_tmp = btrfsic_block_alloc();
if (NULL == superblock_tmp) {
- printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+ pr_info("btrfsic: error, kmalloc failed!\n");
brelse(bh);
return -1;
}
@@ -894,7 +891,7 @@ static int btrfsic_process_superblock_dev_mirror(
btrfs_num_copies(state->root->fs_info,
next_bytenr, state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
- printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+ pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, num_copies);
for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
struct btrfsic_block *next_block;
@@ -905,8 +902,7 @@ static int btrfsic_process_superblock_dev_mirror(
state->metablock_size,
&tmp_next_block_ctx,
mirror_num)) {
- printk(KERN_INFO "btrfsic: btrfsic_map_block("
- "bytenr @%llu, mirror %d) failed!\n",
+ pr_info("btrfsic: btrfsic_map_block(bytenr @%llu, mirror %d) failed!\n",
next_bytenr, mirror_num);
brelse(bh);
return -1;
@@ -948,7 +944,7 @@ static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void)
sf = kzalloc(sizeof(*sf), GFP_NOFS);
if (NULL == sf)
- printk(KERN_INFO "btrfsic: alloc memory failed!\n");
+ pr_info("btrfsic: alloc memory failed!\n");
else
sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER;
return sf;
@@ -994,9 +990,7 @@ continue_with_new_stack_frame:
sf->nr = btrfs_stack_header_nritems(&leafhdr->header);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO
- "leaf %llu items %d generation %llu"
- " owner %llu\n",
+ pr_info("leaf %llu items %d generation %llu owner %llu\n",
sf->block_ctx->start, sf->nr,
btrfs_stack_header_generation(
&leafhdr->header),
@@ -1023,8 +1017,7 @@ continue_with_current_leaf_stack_frame:
if (disk_item_offset + sizeof(struct btrfs_item) >
sf->block_ctx->len) {
leaf_item_out_of_bounce_error:
- printk(KERN_INFO
- "btrfsic: leaf item out of bounce at logical %llu, dev %s\n",
+ pr_info("btrfsic: leaf item out of bounce at logical %llu, dev %s\n",
sf->block_ctx->start,
sf->block_ctx->dev->name);
goto one_stack_frame_backwards;
@@ -1120,8 +1113,7 @@ leaf_item_out_of_bounce_error:
sf->nr = btrfs_stack_header_nritems(&nodehdr->header);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO "node %llu level %d items %d"
- " generation %llu owner %llu\n",
+ pr_info("node %llu level %d items %d generation %llu owner %llu\n",
sf->block_ctx->start,
nodehdr->header.level, sf->nr,
btrfs_stack_header_generation(
@@ -1145,8 +1137,7 @@ continue_with_current_node_stack_frame:
(uintptr_t)nodehdr;
if (key_ptr_offset + sizeof(struct btrfs_key_ptr) >
sf->block_ctx->len) {
- printk(KERN_INFO
- "btrfsic: node item out of bounce at logical %llu, dev %s\n",
+ pr_info("btrfsic: node item out of bounce at logical %llu, dev %s\n",
sf->block_ctx->start,
sf->block_ctx->dev->name);
goto one_stack_frame_backwards;
@@ -1275,7 +1266,7 @@ static int btrfsic_create_link_to_next_block(
btrfs_num_copies(state->root->fs_info,
next_bytenr, state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
- printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+ pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, *num_copiesp);
*mirror_nump = 1;
}
@@ -1284,15 +1275,13 @@ static int btrfsic_create_link_to_next_block(
return 0;
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO
- "btrfsic_create_link_to_next_block(mirror_num=%d)\n",
+ pr_info("btrfsic_create_link_to_next_block(mirror_num=%d)\n",
*mirror_nump);
ret = btrfsic_map_block(state, next_bytenr,
state->metablock_size,
next_block_ctx, *mirror_nump);
if (ret) {
- printk(KERN_INFO
- "btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
+ pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
next_bytenr, *mirror_nump);
btrfsic_release_block_ctx(next_block_ctx);
*next_blockp = NULL;
@@ -1318,16 +1307,14 @@ static int btrfsic_create_link_to_next_block(
if (next_block->logical_bytenr != next_bytenr &&
!(!next_block->is_metadata &&
0 == next_block->logical_bytenr))
- printk(KERN_INFO
- "Referenced block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n",
+ pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n",
next_bytenr, next_block_ctx->dev->name,
next_block_ctx->dev_bytenr, *mirror_nump,
btrfsic_get_block_type(state,
next_block),
next_block->logical_bytenr);
else
- printk(KERN_INFO
- "Referenced block @%llu (%s/%llu/%d) found in hash table, %c.\n",
+ pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, %c.\n",
next_bytenr, next_block_ctx->dev->name,
next_block_ctx->dev_bytenr, *mirror_nump,
btrfsic_get_block_type(state,
@@ -1348,7 +1335,7 @@ static int btrfsic_create_link_to_next_block(
if (NULL == l) {
l = btrfsic_block_link_alloc();
if (NULL == l) {
- printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+ pr_info("btrfsic: error, kmalloc failed!\n");
btrfsic_release_block_ctx(next_block_ctx);
*next_blockp = NULL;
return -1;
@@ -1381,8 +1368,7 @@ static int btrfsic_create_link_to_next_block(
if (limit_nesting > 0 && did_alloc_block_link) {
ret = btrfsic_read_block(state, next_block_ctx);
if (ret < (int)next_block_ctx->len) {
- printk(KERN_INFO
- "btrfsic: read block @logical %llu failed!\n",
+ pr_info("btrfsic: read block @logical %llu failed!\n",
next_bytenr);
btrfsic_release_block_ctx(next_block_ctx);
*next_blockp = NULL;
@@ -1417,8 +1403,7 @@ static int btrfsic_handle_extent_data(
if (file_extent_item_offset +
offsetof(struct btrfs_file_extent_item, disk_num_bytes) >
block_ctx->len) {
- printk(KERN_INFO
- "btrfsic: file item out of bounce at logical %llu, dev %s\n",
+ pr_info("btrfsic: file item out of bounce at logical %llu, dev %s\n",
block_ctx->start, block_ctx->dev->name);
return -1;
}
@@ -1429,7 +1414,7 @@ static int btrfsic_handle_extent_data(
if (BTRFS_FILE_EXTENT_REG != file_extent_item.type ||
btrfs_stack_file_extent_disk_bytenr(&file_extent_item) == 0) {
if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
- printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu\n",
+ pr_info("extent_data: type %u, disk_bytenr = %llu\n",
file_extent_item.type,
btrfs_stack_file_extent_disk_bytenr(
&file_extent_item));
@@ -1438,8 +1423,7 @@ static int btrfsic_handle_extent_data(
if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) >
block_ctx->len) {
- printk(KERN_INFO
- "btrfsic: file item out of bounce at logical %llu, dev %s\n",
+ pr_info("btrfsic: file item out of bounce at logical %llu, dev %s\n",
block_ctx->start, block_ctx->dev->name);
return -1;
}
@@ -1457,8 +1441,7 @@ static int btrfsic_handle_extent_data(
generation = btrfs_stack_file_extent_generation(&file_extent_item);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
- printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu,"
- " offset = %llu, num_bytes = %llu\n",
+ pr_info("extent_data: type %u, disk_bytenr = %llu, offset = %llu, num_bytes = %llu\n",
file_extent_item.type,
btrfs_stack_file_extent_disk_bytenr(&file_extent_item),
btrfs_stack_file_extent_offset(&file_extent_item),
@@ -1477,7 +1460,7 @@ static int btrfsic_handle_extent_data(
btrfs_num_copies(state->root->fs_info,
next_bytenr, state->datablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
- printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+ pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, num_copies);
for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
struct btrfsic_block_data_ctx next_block_ctx;
@@ -1485,19 +1468,16 @@ static int btrfsic_handle_extent_data(
int block_was_created;
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO "btrfsic_handle_extent_data("
- "mirror_num=%d)\n", mirror_num);
+ pr_info("btrfsic_handle_extent_data(mirror_num=%d)\n",
+ mirror_num);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
- printk(KERN_INFO
- "\tdisk_bytenr = %llu, num_bytes %u\n",
+ pr_info("\tdisk_bytenr = %llu, num_bytes %u\n",
next_bytenr, chunk_len);
ret = btrfsic_map_block(state, next_bytenr,
chunk_len, &next_block_ctx,
mirror_num);
if (ret) {
- printk(KERN_INFO
- "btrfsic: btrfsic_map_block(@%llu,"
- " mirror=%d) failed!\n",
+ pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
next_bytenr, mirror_num);
return -1;
}
@@ -1512,8 +1492,7 @@ static int btrfsic_handle_extent_data(
mirror_num,
&block_was_created);
if (NULL == next_block) {
- printk(KERN_INFO
- "btrfsic: error, kmalloc failed!\n");
+ pr_info("btrfsic: error, kmalloc failed!\n");
btrfsic_release_block_ctx(&next_block_ctx);
return -1;
}
@@ -1523,12 +1502,7 @@ static int btrfsic_handle_extent_data(
next_block->logical_bytenr != next_bytenr &&
!(!next_block->is_metadata &&
0 == next_block->logical_bytenr)) {
- printk(KERN_INFO
- "Referenced block"
- " @%llu (%s/%llu/%d)"
- " found in hash table, D,"
- " bytenr mismatch"
- " (!= stored %llu).\n",
+ pr_info("Referenced block @%llu (%s/%llu/%d) found in hash table, D, bytenr mismatch (!= stored %llu).\n",
next_bytenr,
next_block_ctx.dev->name,
next_block_ctx.dev_bytenr,
@@ -1592,7 +1566,7 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
kfree(multi);
if (NULL == block_ctx_out->dev) {
ret = -ENXIO;
- printk(KERN_INFO "btrfsic: error, cannot lookup dev (#1)!\n");
+ pr_info("btrfsic: error, cannot lookup dev (#1)!\n");
}
return ret;
@@ -1638,8 +1612,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
BUG_ON(block_ctx->pagev);
BUG_ON(block_ctx->mem_to_free);
if (block_ctx->dev_bytenr & ((u64)PAGE_SIZE - 1)) {
- printk(KERN_INFO
- "btrfsic: read_block() with unaligned bytenr %llu\n",
+ pr_info("btrfsic: read_block() with unaligned bytenr %llu\n",
block_ctx->dev_bytenr);
return -1;
}
@@ -1666,8 +1639,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i);
if (!bio) {
- printk(KERN_INFO
- "btrfsic: bio_alloc() for %u pages failed!\n",
+ pr_info("btrfsic: bio_alloc() for %u pages failed!\n",
num_pages - i);
return -1;
}
@@ -1682,13 +1654,11 @@ static int btrfsic_read_block(struct btrfsic_state *state,
break;
}
if (j == i) {
- printk(KERN_INFO
- "btrfsic: error, failed to add a single page!\n");
+ pr_info("btrfsic: error, failed to add a single page!\n");
return -1;
}
if (submit_bio_wait(bio)) {
- printk(KERN_INFO
- "btrfsic: read error at logical %llu dev %s!\n",
+ pr_info("btrfsic: read error at logical %llu dev %s!\n",
block_ctx->start, block_ctx->dev->name);
bio_put(bio);
return -1;
@@ -1700,7 +1670,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
for (i = 0; i < num_pages; i++) {
block_ctx->datav[i] = kmap(block_ctx->pagev[i]);
if (!block_ctx->datav[i]) {
- printk(KERN_INFO "btrfsic: kmap() failed (dev %s)!\n",
+ pr_info("btrfsic: kmap() failed (dev %s)!\n",
block_ctx->dev->name);
return -1;
}
@@ -1715,19 +1685,17 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
BUG_ON(NULL == state);
- printk(KERN_INFO "all_blocks_list:\n");
+ pr_info("all_blocks_list:\n");
list_for_each_entry(b_all, &state->all_blocks_list, all_blocks_node) {
const struct btrfsic_block_link *l;
- printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n",
+ pr_info("%c-block @%llu (%s/%llu/%d)\n",
btrfsic_get_block_type(state, b_all),
b_all->logical_bytenr, b_all->dev_state->name,
b_all->dev_bytenr, b_all->mirror_num);
list_for_each_entry(l, &b_all->ref_to_list, node_ref_to) {
- printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
- " refers %u* to"
- " %c @%llu (%s/%llu/%d)\n",
+ pr_info(" %c @%llu (%s/%llu/%d) refers %u* to %c @%llu (%s/%llu/%d)\n",
btrfsic_get_block_type(state, b_all),
b_all->logical_bytenr, b_all->dev_state->name,
b_all->dev_bytenr, b_all->mirror_num,
@@ -1740,9 +1708,7 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
}
list_for_each_entry(l, &b_all->ref_from_list, node_ref_from) {
- printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
- " is ref %u* from"
- " %c @%llu (%s/%llu/%d)\n",
+ pr_info(" %c @%llu (%s/%llu/%d) is ref %u* from %c @%llu (%s/%llu/%d)\n",
btrfsic_get_block_type(state, b_all),
b_all->logical_bytenr, b_all->dev_state->name,
b_all->dev_bytenr, b_all->mirror_num,
@@ -1754,7 +1720,7 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
l->block_ref_from->mirror_num);
}
- printk(KERN_INFO "\n");
+ pr_info("\n");
}
}
@@ -1829,8 +1795,7 @@ again:
mapped_datav[0]);
if (num_pages * PAGE_SIZE <
BTRFS_SUPER_INFO_SIZE) {
- printk(KERN_INFO
- "btrfsic: cannot work with too short bios!\n");
+ pr_info("btrfsic: cannot work with too short bios!\n");
return;
}
is_metadata = 1;
@@ -1838,8 +1803,7 @@ again:
processed_len = BTRFS_SUPER_INFO_SIZE;
if (state->print_mask &
BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
- printk(KERN_INFO
- "[before new superblock is written]:\n");
+ pr_info("[before new superblock is written]:\n");
btrfsic_dump_tree_sub(state, block, 0);
}
}
@@ -1847,8 +1811,7 @@ again:
if (!block->is_superblock) {
if (num_pages * PAGE_SIZE <
state->metablock_size) {
- printk(KERN_INFO
- "btrfsic: cannot work with too short bios!\n");
+ pr_info("btrfsic: cannot work with too short bios!\n");
return;
}
processed_len = state->metablock_size;
@@ -1863,8 +1826,7 @@ again:
if (block->logical_bytenr != bytenr &&
!(!block->is_metadata &&
block->logical_bytenr == 0))
- printk(KERN_INFO
- "Written block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n",
+ pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n",
bytenr, dev_state->name,
dev_bytenr,
block->mirror_num,
@@ -1872,8 +1834,7 @@ again:
block),
block->logical_bytenr);
else
- printk(KERN_INFO
- "Written block @%llu (%s/%llu/%d) found in hash table, %c.\n",
+ pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c.\n",
bytenr, dev_state->name,
dev_bytenr, block->mirror_num,
btrfsic_get_block_type(state,
@@ -1883,33 +1844,24 @@ again:
} else {
if (num_pages * PAGE_SIZE <
state->datablock_size) {
- printk(KERN_INFO
- "btrfsic: cannot work with too short bios!\n");
+ pr_info("btrfsic: cannot work with too short bios!\n");
return;
}
processed_len = state->datablock_size;
bytenr = block->logical_bytenr;
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO
- "Written block @%llu (%s/%llu/%d)"
- " found in hash table, %c.\n",
+ pr_info("Written block @%llu (%s/%llu/%d) found in hash table, %c.\n",
bytenr, dev_state->name, dev_bytenr,
block->mirror_num,
btrfsic_get_block_type(state, block));
}
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO
- "ref_to_list: %cE, ref_from_list: %cE\n",
+ pr_info("ref_to_list: %cE, ref_from_list: %cE\n",
list_empty(&block->ref_to_list) ? ' ' : '!',
list_empty(&block->ref_from_list) ? ' ' : '!');
if (btrfsic_is_block_ref_by_superblock(state, block, 0)) {
- printk(KERN_INFO "btrfs: attempt to overwrite %c-block"
- " @%llu (%s/%llu/%d), old(gen=%llu,"
- " objectid=%llu, type=%d, offset=%llu),"
- " new(gen=%llu),"
- " which is referenced by most recent superblock"
- " (superblockgen=%llu)!\n",
+ pr_info("btrfs: attempt to overwrite %c-block @%llu (%s/%llu/%d), old(gen=%llu, objectid=%llu, type=%d, offset=%llu), new(gen=%llu), which is referenced by most recent superblock (superblockgen=%llu)!\n",
btrfsic_get_block_type(state, block), bytenr,
dev_state->name, dev_bytenr, block->mirror_num,
block->generation,
@@ -1923,9 +1875,7 @@ again:
}
if (!block->is_iodone && !block->never_written) {
- printk(KERN_INFO "btrfs: attempt to overwrite %c-block"
- " @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu,"
- " which is not yet iodone!\n",
+ pr_info("btrfs: attempt to overwrite %c-block @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu, which is not yet iodone!\n",
btrfsic_get_block_type(state, block), bytenr,
dev_state->name, dev_bytenr, block->mirror_num,
block->generation,
@@ -2023,8 +1973,7 @@ again:
mapped_datav[0]);
if (state->print_mask &
BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) {
- printk(KERN_INFO
- "[after new superblock is written]:\n");
+ pr_info("[after new superblock is written]:\n");
btrfsic_dump_tree_sub(state, block, 0);
}
} else {
@@ -2036,9 +1985,7 @@ again:
0, 0);
}
if (ret)
- printk(KERN_INFO
- "btrfsic: btrfsic_process_metablock"
- "(root @%llu) failed!\n",
+ pr_info("btrfsic: btrfsic_process_metablock(root @%llu) failed!\n",
dev_bytenr);
} else {
block->is_metadata = 0;
@@ -2065,8 +2012,7 @@ again:
if (!is_metadata) {
processed_len = state->datablock_size;
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO "Written block (%s/%llu/?)"
- " !found in hash table, D.\n",
+ pr_info("Written block (%s/%llu/?) !found in hash table, D.\n",
dev_state->name, dev_bytenr);
if (!state->include_extent_data) {
/* ignore that written D block */
@@ -2084,9 +2030,7 @@ again:
btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state,
dev_bytenr);
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO
- "Written block @%llu (%s/%llu/?)"
- " !found in hash table, M.\n",
+ pr_info("Written block @%llu (%s/%llu/?) !found in hash table, M.\n",
bytenr, dev_state->name, dev_bytenr);
}
@@ -2100,7 +2044,7 @@ again:
block = btrfsic_block_alloc();
if (NULL == block) {
- printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+ pr_info("btrfsic: error, kmalloc failed!\n");
btrfsic_release_block_ctx(&block_ctx);
goto continue_loop;
}
@@ -2150,8 +2094,7 @@ again:
block->next_in_same_bio = NULL;
}
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO
- "New written %c-block @%llu (%s/%llu/%d)\n",
+ pr_info("New written %c-block @%llu (%s/%llu/%d)\n",
is_metadata ? 'M' : 'D',
block->logical_bytenr, block->dev_state->name,
block->dev_bytenr, block->mirror_num);
@@ -2162,9 +2105,7 @@ again:
ret = btrfsic_process_metablock(state, block,
&block_ctx, 0, 0);
if (ret)
- printk(KERN_INFO
- "btrfsic: process_metablock(root @%llu)"
- " failed!\n",
+ pr_info("btrfsic: process_metablock(root @%llu) failed!\n",
dev_bytenr);
}
btrfsic_release_block_ctx(&block_ctx);
@@ -2199,8 +2140,7 @@ static void btrfsic_bio_end_io(struct bio *bp)
if ((dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
- printk(KERN_INFO
- "bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
+ pr_info("bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
bp->bi_error,
btrfsic_get_block_type(dev_state->state, block),
block->logical_bytenr, dev_state->name,
@@ -2211,8 +2151,7 @@ static void btrfsic_bio_end_io(struct bio *bp)
dev_state->last_flush_gen++;
if ((dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
- printk(KERN_INFO
- "bio_end_io() new %s flush_gen=%llu\n",
+ pr_info("bio_end_io() new %s flush_gen=%llu\n",
dev_state->name,
dev_state->last_flush_gen);
}
@@ -2235,8 +2174,7 @@ static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate)
BUG_ON(NULL == block);
dev_state = block->dev_state;
if ((dev_state->state->print_mask & BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
- printk(KERN_INFO
- "bh_end_io(error=%d) for %c @%llu (%s/%llu/%d)\n",
+ pr_info("bh_end_io(error=%d) for %c @%llu (%s/%llu/%d)\n",
iodone_w_error,
btrfsic_get_block_type(dev_state->state, block),
block->logical_bytenr, block->dev_state->name,
@@ -2247,8 +2185,7 @@ static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate)
dev_state->last_flush_gen++;
if ((dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
- printk(KERN_INFO
- "bh_end_io() new %s flush_gen=%llu\n",
+ pr_info("bh_end_io() new %s flush_gen=%llu\n",
dev_state->name, dev_state->last_flush_gen);
}
if (block->submit_bio_bh_rw & REQ_FUA)
@@ -2271,9 +2208,7 @@ static int btrfsic_process_written_superblock(
if (!(superblock->generation > state->max_superblock_generation ||
0 == state->max_superblock_generation)) {
if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
- printk(KERN_INFO
- "btrfsic: superblock @%llu (%s/%llu/%d)"
- " with old gen %llu <= %llu\n",
+ pr_info("btrfsic: superblock @%llu (%s/%llu/%d) with old gen %llu <= %llu\n",
superblock->logical_bytenr,
superblock->dev_state->name,
superblock->dev_bytenr, superblock->mirror_num,
@@ -2281,9 +2216,7 @@ static int btrfsic_process_written_superblock(
state->max_superblock_generation);
} else {
if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
- printk(KERN_INFO
- "btrfsic: got new superblock @%llu (%s/%llu/%d)"
- " with new gen %llu > %llu\n",
+ pr_info("btrfsic: got new superblock @%llu (%s/%llu/%d) with new gen %llu > %llu\n",
superblock->logical_bytenr,
superblock->dev_state->name,
superblock->dev_bytenr, superblock->mirror_num,
@@ -2318,7 +2251,7 @@ static int btrfsic_process_written_superblock(
next_bytenr = btrfs_super_root(super_hdr);
if (state->print_mask &
BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- printk(KERN_INFO "root@%llu\n", next_bytenr);
+ pr_info("root@%llu\n", next_bytenr);
break;
case 1:
btrfs_set_disk_key_objectid(&tmp_disk_key,
@@ -2327,7 +2260,7 @@ static int btrfsic_process_written_superblock(
next_bytenr = btrfs_super_chunk_root(super_hdr);
if (state->print_mask &
BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- printk(KERN_INFO "chunk@%llu\n", next_bytenr);
+ pr_info("chunk@%llu\n", next_bytenr);
break;
case 2:
btrfs_set_disk_key_objectid(&tmp_disk_key,
@@ -2338,7 +2271,7 @@ static int btrfsic_process_written_superblock(
continue;
if (state->print_mask &
BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- printk(KERN_INFO "log@%llu\n", next_bytenr);
+ pr_info("log@%llu\n", next_bytenr);
break;
}
@@ -2346,23 +2279,19 @@ static int btrfsic_process_written_superblock(
btrfs_num_copies(state->root->fs_info,
next_bytenr, BTRFS_SUPER_INFO_SIZE);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
- printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+ pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, num_copies);
for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
int was_created;
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO
- "btrfsic_process_written_superblock("
- "mirror_num=%d)\n", mirror_num);
+ pr_info("btrfsic_process_written_superblock(mirror_num=%d)\n", mirror_num);
ret = btrfsic_map_block(state, next_bytenr,
BTRFS_SUPER_INFO_SIZE,
&tmp_next_block_ctx,
mirror_num);
if (ret) {
- printk(KERN_INFO
- "btrfsic: btrfsic_map_block(@%llu,"
- " mirror=%d) failed!\n",
+ pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
next_bytenr, mirror_num);
return -1;
}
@@ -2375,8 +2304,7 @@ static int btrfsic_process_written_superblock(
mirror_num,
&was_created);
if (NULL == next_block) {
- printk(KERN_INFO
- "btrfsic: error, kmalloc failed!\n");
+ pr_info("btrfsic: error, kmalloc failed!\n");
btrfsic_release_block_ctx(&tmp_next_block_ctx);
return -1;
}
@@ -2425,8 +2353,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
* by the most recent super block.
*/
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO
- "btrfsic: abort cyclic linkage (case 1).\n");
+ pr_info("btrfsic: abort cyclic linkage (case 1).\n");
return ret;
}
@@ -2437,9 +2364,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
*/
list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO
- "rl=%d, %c @%llu (%s/%llu/%d)"
- " %u* refers to %c @%llu (%s/%llu/%d)\n",
+ pr_info("rl=%d, %c @%llu (%s/%llu/%d) %u* refers to %c @%llu (%s/%llu/%d)\n",
recursion_level,
btrfsic_get_block_type(state, block),
block->logical_bytenr, block->dev_state->name,
@@ -2451,9 +2376,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
l->block_ref_to->dev_bytenr,
l->block_ref_to->mirror_num);
if (l->block_ref_to->never_written) {
- printk(KERN_INFO "btrfs: attempt to write superblock"
- " which references block %c @%llu (%s/%llu/%d)"
- " which is never written!\n",
+ pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is never written!\n",
btrfsic_get_block_type(state, l->block_ref_to),
l->block_ref_to->logical_bytenr,
l->block_ref_to->dev_state->name,
@@ -2461,9 +2384,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
l->block_ref_to->mirror_num);
ret = -1;
} else if (!l->block_ref_to->is_iodone) {
- printk(KERN_INFO "btrfs: attempt to write superblock"
- " which references block %c @%llu (%s/%llu/%d)"
- " which is not yet iodone!\n",
+ pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is not yet iodone!\n",
btrfsic_get_block_type(state, l->block_ref_to),
l->block_ref_to->logical_bytenr,
l->block_ref_to->dev_state->name,
@@ -2471,9 +2392,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
l->block_ref_to->mirror_num);
ret = -1;
} else if (l->block_ref_to->iodone_w_error) {
- printk(KERN_INFO "btrfs: attempt to write superblock"
- " which references block %c @%llu (%s/%llu/%d)"
- " which has write error!\n",
+ pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which has write error!\n",
btrfsic_get_block_type(state, l->block_ref_to),
l->block_ref_to->logical_bytenr,
l->block_ref_to->dev_state->name,
@@ -2486,10 +2405,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
l->parent_generation &&
BTRFSIC_GENERATION_UNKNOWN !=
l->block_ref_to->generation) {
- printk(KERN_INFO "btrfs: attempt to write superblock"
- " which references block %c @%llu (%s/%llu/%d)"
- " with generation %llu !="
- " parent generation %llu!\n",
+ pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) with generation %llu != parent generation %llu!\n",
btrfsic_get_block_type(state, l->block_ref_to),
l->block_ref_to->logical_bytenr,
l->block_ref_to->dev_state->name,
@@ -2500,11 +2416,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
ret = -1;
} else if (l->block_ref_to->flush_gen >
l->block_ref_to->dev_state->last_flush_gen) {
- printk(KERN_INFO "btrfs: attempt to write superblock"
- " which references block %c @%llu (%s/%llu/%d)"
- " which is not flushed out of disk's write cache"
- " (block flush_gen=%llu,"
- " dev->flush_gen=%llu)!\n",
+ pr_info("btrfs: attempt to write superblock which references block %c @%llu (%s/%llu/%d) which is not flushed out of disk's write cache (block flush_gen=%llu, dev->flush_gen=%llu)!\n",
btrfsic_get_block_type(state, l->block_ref_to),
l->block_ref_to->logical_bytenr,
l->block_ref_to->dev_state->name,
@@ -2533,8 +2445,7 @@ static int btrfsic_is_block_ref_by_superblock(
if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
/* refer to comment at "abort cyclic linkage (case 1)" */
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO
- "btrfsic: abort cyclic linkage (case 2).\n");
+ pr_info("btrfsic: abort cyclic linkage (case 2).\n");
return 0;
}
@@ -2545,9 +2456,7 @@ static int btrfsic_is_block_ref_by_superblock(
*/
list_for_each_entry(l, &block->ref_from_list, node_ref_from) {
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO
- "rl=%d, %c @%llu (%s/%llu/%d)"
- " is ref %u* from %c @%llu (%s/%llu/%d)\n",
+ pr_info("rl=%d, %c @%llu (%s/%llu/%d) is ref %u* from %c @%llu (%s/%llu/%d)\n",
recursion_level,
btrfsic_get_block_type(state, block),
block->logical_bytenr, block->dev_state->name,
@@ -2577,9 +2486,7 @@ static int btrfsic_is_block_ref_by_superblock(
static void btrfsic_print_add_link(const struct btrfsic_state *state,
const struct btrfsic_block_link *l)
{
- printk(KERN_INFO
- "Add %u* link from %c @%llu (%s/%llu/%d)"
- " to %c @%llu (%s/%llu/%d).\n",
+ pr_info("Add %u* link from %c @%llu (%s/%llu/%d) to %c @%llu (%s/%llu/%d).\n",
l->ref_cnt,
btrfsic_get_block_type(state, l->block_ref_from),
l->block_ref_from->logical_bytenr,
@@ -2594,9 +2501,7 @@ static void btrfsic_print_add_link(const struct btrfsic_state *state,
static void btrfsic_print_rem_link(const struct btrfsic_state *state,
const struct btrfsic_block_link *l)
{
- printk(KERN_INFO
- "Rem %u* link from %c @%llu (%s/%llu/%d)"
- " to %c @%llu (%s/%llu/%d).\n",
+ pr_info("Rem %u* link from %c @%llu (%s/%llu/%d) to %c @%llu (%s/%llu/%d).\n",
l->ref_cnt,
btrfsic_get_block_type(state, l->block_ref_from),
l->block_ref_from->logical_bytenr,
@@ -2708,8 +2613,7 @@ static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add(
if (NULL == l) {
l = btrfsic_block_link_alloc();
if (NULL == l) {
- printk(KERN_INFO
- "btrfsic: error, kmalloc" " failed!\n");
+ pr_info("btrfsic: error, kmalloc failed!\n");
return NULL;
}
@@ -2756,13 +2660,12 @@ static struct btrfsic_block *btrfsic_block_lookup_or_add(
block = btrfsic_block_alloc();
if (NULL == block) {
- printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+ pr_info("btrfsic: error, kmalloc failed!\n");
return NULL;
}
dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev);
if (NULL == dev_state) {
- printk(KERN_INFO
- "btrfsic: error, lookup dev_state failed!\n");
+ pr_info("btrfsic: error, lookup dev_state failed!\n");
btrfsic_block_free(block);
return NULL;
}
@@ -2774,8 +2677,7 @@ static struct btrfsic_block *btrfsic_block_lookup_or_add(
block->never_written = never_written;
block->mirror_num = mirror_num;
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- printk(KERN_INFO
- "New %s%c-block @%llu (%s/%llu/%d)\n",
+ pr_info("New %s%c-block @%llu (%s/%llu/%d)\n",
additional_string,
btrfsic_get_block_type(state, block),
block->logical_bytenr, dev_state->name,
@@ -2810,9 +2712,7 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
ret = btrfsic_map_block(state, bytenr, state->metablock_size,
&block_ctx, mirror_num);
if (ret) {
- printk(KERN_INFO "btrfsic:"
- " btrfsic_map_block(logical @%llu,"
- " mirror %d) failed!\n",
+ pr_info("btrfsic: btrfsic_map_block(logical @%llu, mirror %d) failed!\n",
bytenr, mirror_num);
continue;
}
@@ -2827,9 +2727,7 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
}
if (WARN_ON(!match)) {
- printk(KERN_INFO "btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio,"
- " buffer->log_bytenr=%llu, submit_bio(bdev=%s,"
- " phys_bytenr=%llu)!\n",
+ pr_info("btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio, buffer->log_bytenr=%llu, submit_bio(bdev=%s, phys_bytenr=%llu)!\n",
bytenr, dev_state->name, dev_bytenr);
for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
ret = btrfsic_map_block(state, bytenr,
@@ -2838,8 +2736,7 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
if (ret)
continue;
- printk(KERN_INFO "Read logical bytenr @%llu maps to"
- " (%s/%llu/%d)\n",
+ pr_info("Read logical bytenr @%llu maps to (%s/%llu/%d)\n",
bytenr, block_ctx.dev->name,
block_ctx.dev_bytenr, mirror_num);
}
@@ -2849,11 +2746,8 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
struct block_device *bdev)
{
- struct btrfsic_dev_state *ds;
-
- ds = btrfsic_dev_state_hashtable_lookup(bdev,
- &btrfsic_dev_state_hashtable);
- return ds;
+ return btrfsic_dev_state_hashtable_lookup(bdev,
+ &btrfsic_dev_state_hashtable);
}
int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh)
@@ -2876,9 +2770,7 @@ int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh)
dev_bytenr = 4096 * bh->b_blocknr;
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
- printk(KERN_INFO
- "submit_bh(op=0x%x,0x%x, blocknr=%llu "
- "(bytenr %llu), size=%zu, data=%p, bdev=%p)\n",
+ pr_info("submit_bh(op=0x%x,0x%x, blocknr=%llu (bytenr %llu), size=%zu, data=%p, bdev=%p)\n",
op, op_flags, (unsigned long long)bh->b_blocknr,
dev_bytenr, bh->b_size, bh->b_data, bh->b_bdev);
btrfsic_process_written_block(dev_state, dev_bytenr,
@@ -2887,17 +2779,13 @@ int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh)
} else if (NULL != dev_state && (op_flags & REQ_PREFLUSH)) {
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
- printk(KERN_INFO
- "submit_bh(op=0x%x,0x%x FLUSH, bdev=%p)\n",
+ pr_info("submit_bh(op=0x%x,0x%x FLUSH, bdev=%p)\n",
op, op_flags, bh->b_bdev);
if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
if ((dev_state->state->print_mask &
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
BTRFSIC_PRINT_MASK_VERBOSE)))
- printk(KERN_INFO
- "btrfsic_submit_bh(%s) with FLUSH"
- " but dummy block already in use"
- " (ignored)!\n",
+ pr_info("btrfsic_submit_bh(%s) with FLUSH but dummy block already in use (ignored)!\n",
dev_state->name);
} else {
struct btrfsic_block *const block =
@@ -2942,9 +2830,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
bio_is_patched = 0;
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
- printk(KERN_INFO
- "submit_bio(rw=%d,0x%x, bi_vcnt=%u,"
- " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
+ pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
bio_op(bio), bio->bi_opf, bio->bi_vcnt,
(unsigned long long)bio->bi_iter.bi_sector,
dev_bytenr, bio->bi_bdev);
@@ -2967,8 +2853,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
}
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
- printk(KERN_INFO
- "#%u: bytenr=%llu, len=%u, offset=%u\n",
+ pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n",
i, cur_bytenr, bio->bi_io_vec[i].bv_len,
bio->bi_io_vec[i].bv_offset);
cur_bytenr += bio->bi_io_vec[i].bv_len;
@@ -2985,17 +2870,13 @@ static void __btrfsic_submit_bio(struct bio *bio)
} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
- printk(KERN_INFO
- "submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
+ pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
bio_op(bio), bio->bi_opf, bio->bi_bdev);
if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
if ((dev_state->state->print_mask &
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
BTRFSIC_PRINT_MASK_VERBOSE)))
- printk(KERN_INFO
- "btrfsic_submit_bio(%s) with FLUSH"
- " but dummy block already in use"
- " (ignored)!\n",
+ pr_info("btrfsic_submit_bio(%s) with FLUSH but dummy block already in use (ignored)!\n",
dev_state->name);
} else {
struct btrfsic_block *const block =
@@ -3039,14 +2920,12 @@ int btrfsic_mount(struct btrfs_root *root,
struct btrfs_device *device;
if (root->nodesize & ((u64)PAGE_SIZE - 1)) {
- printk(KERN_INFO
- "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
+ pr_info("btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
root->nodesize, PAGE_SIZE);
return -1;
}
if (root->sectorsize & ((u64)PAGE_SIZE - 1)) {
- printk(KERN_INFO
- "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
+ pr_info("btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
root->sectorsize, PAGE_SIZE);
return -1;
}
@@ -3054,7 +2933,7 @@ int btrfsic_mount(struct btrfs_root *root,
if (!state) {
state = vzalloc(sizeof(*state));
if (!state) {
- printk(KERN_INFO "btrfs check-integrity: vzalloc() failed!\n");
+ pr_info("btrfs check-integrity: vzalloc() failed!\n");
return -1;
}
}
@@ -3086,8 +2965,7 @@ int btrfsic_mount(struct btrfs_root *root,
ds = btrfsic_dev_state_alloc();
if (NULL == ds) {
- printk(KERN_INFO
- "btrfs check-integrity: kmalloc() failed!\n");
+ pr_info("btrfs check-integrity: kmalloc() failed!\n");
mutex_unlock(&btrfsic_mutex);
return -1;
}
@@ -3148,9 +3026,7 @@ void btrfsic_unmount(struct btrfs_root *root,
}
if (NULL == state) {
- printk(KERN_INFO
- "btrfsic: error, cannot find state information"
- " on umount!\n");
+ pr_info("btrfsic: error, cannot find state information on umount!\n");
mutex_unlock(&btrfsic_mutex);
return;
}
@@ -3177,9 +3053,7 @@ void btrfsic_unmount(struct btrfs_root *root,
if (b_all->is_iodone || b_all->never_written)
btrfsic_block_free(b_all);
else
- printk(KERN_INFO "btrfs: attempt to free %c-block"
- " @%llu (%s/%llu/%d) on umount which is"
- " not yet iodone!\n",
+ pr_info("btrfs: attempt to free %c-block @%llu (%s/%llu/%d) on umount which is not yet iodone!\n",
btrfsic_get_block_type(state, b_all),
b_all->logical_bytenr, b_all->dev_state->name,
b_all->dev_bytenr, b_all->mirror_num);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 029db6e1105c..ccc70d96958d 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -783,8 +783,7 @@ void __init btrfs_init_compress(void)
*/
workspace = btrfs_compress_op[i]->alloc_workspace();
if (IS_ERR(workspace)) {
- printk(KERN_WARNING
- "BTRFS: cannot preallocate compression workspace, will try later");
+ pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
} else {
atomic_set(&btrfs_comp_ws[i].total_ws, 1);
btrfs_comp_ws[i].free_ws = 1;
@@ -854,8 +853,7 @@ again:
/* no burst */ 1);
if (__ratelimit(&_rs)) {
- printk(KERN_WARNING
- "no compression workspaces, low memory, retrying");
+ pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
}
}
goto again;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index d1c56c94dd5a..f6ba165d3f81 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -45,9 +45,7 @@ static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
struct btrfs_path *btrfs_alloc_path(void)
{
- struct btrfs_path *path;
- path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
- return path;
+ return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
}
/*
@@ -1102,7 +1100,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
int level, ret;
int last_ref = 0;
int unlock_orig = 0;
- u64 parent_start;
+ u64 parent_start = 0;
if (*cow_ret == buf)
unlock_orig = 1;
@@ -1121,13 +1119,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
else
btrfs_node_key(buf, &disk_key, 0);
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
- if (parent)
- parent_start = parent->start;
- else
- parent_start = 0;
- } else
- parent_start = 0;
+ if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
+ parent_start = parent->start;
cow = btrfs_alloc_tree_block(trans, root, parent_start,
root->root_key.objectid, &disk_key, level,
@@ -1170,8 +1163,6 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
parent_start = buf->start;
- else
- parent_start = 0;
extent_buffer_get(cow);
tree_mod_log_set_root_pointer(root, cow, 1);
@@ -1182,11 +1173,6 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
free_extent_buffer(buf);
add_root_to_dirty_list(root);
} else {
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
- parent_start = parent->start;
- else
- parent_start = 0;
-
WARN_ON(trans->transid != btrfs_header_generation(parent));
tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
MOD_LOG_KEY_REPLACE, GFP_NOFS);
@@ -1729,20 +1715,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
return err;
}
-/*
- * The leaf data grows from end-to-front in the node.
- * this returns the address of the start of the last item,
- * which is the stop of the leaf data stack
- */
-static inline unsigned int leaf_data_end(struct btrfs_root *root,
- struct extent_buffer *leaf)
-{
- u32 nr = btrfs_header_nritems(leaf);
- if (nr == 0)
- return BTRFS_LEAF_DATA_SIZE(root);
- return btrfs_item_offset_nr(leaf, nr - 1);
-}
-
/*
* search for key in the extent_buffer. The items start at offset p,
@@ -2268,7 +2240,6 @@ static void reada_for_search(struct btrfs_root *root,
u64 search;
u64 target;
u64 nread = 0;
- u64 gen;
struct extent_buffer *eb;
u32 nr;
u32 blocksize;
@@ -2313,7 +2284,6 @@ static void reada_for_search(struct btrfs_root *root,
search = btrfs_node_blockptr(node, nr);
if ((search <= target && target - search <= 65536) ||
(search > target && search - target <= 65536)) {
- gen = btrfs_node_ptr_generation(node, nr);
readahead_tree_block(root, search);
nread += blocksize;
}
@@ -4341,7 +4311,11 @@ again:
if (path->slots[1] == 0)
fixup_low_keys(fs_info, path, &disk_key, 1);
}
- btrfs_mark_buffer_dirty(right);
+ /*
+ * We create a new leaf 'right' for the required ins_len and
+ * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
+ * the content of ins_len to 'right'.
+ */
return ret;
}
@@ -4772,8 +4746,9 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
if (btrfs_leaf_free_space(root, leaf) < total_size) {
btrfs_print_leaf(root, leaf);
- btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
- total_size, btrfs_leaf_free_space(root, leaf));
+ btrfs_crit(root->fs_info,
+ "not enough freespace need %u have %d",
+ total_size, btrfs_leaf_free_space(root, leaf));
BUG();
}
@@ -4782,8 +4757,9 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
if (old_data < data_end) {
btrfs_print_leaf(root, leaf);
- btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
- slot, old_data, data_end);
+ btrfs_crit(root->fs_info,
+ "slot %d old_data %d data_end %d",
+ slot, old_data, data_end);
BUG_ON(1);
}
/*
@@ -4793,7 +4769,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
for (i = slot; i < nritems; i++) {
u32 ioff;
- item = btrfs_item_nr( i);
+ item = btrfs_item_nr(i);
ioff = btrfs_token_item_offset(leaf, item, &token);
btrfs_set_token_item_offset(leaf, item,
ioff - total_data, &token);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index e62fd50237e4..6c21bad26a27 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -37,6 +37,7 @@
#include <linux/workqueue.h>
#include <linux/security.h>
#include <linux/sizes.h>
+#include <linux/dynamic_debug.h>
#include "extent_io.h"
#include "extent_map.h"
#include "async-thread.h"
@@ -676,9 +677,25 @@ struct btrfs_device;
struct btrfs_fs_devices;
struct btrfs_balance_control;
struct btrfs_delayed_root;
+
+#define BTRFS_FS_BARRIER 1
+#define BTRFS_FS_CLOSING_START 2
+#define BTRFS_FS_CLOSING_DONE 3
+#define BTRFS_FS_LOG_RECOVERING 4
+#define BTRFS_FS_OPEN 5
+#define BTRFS_FS_QUOTA_ENABLED 6
+#define BTRFS_FS_QUOTA_ENABLING 7
+#define BTRFS_FS_QUOTA_DISABLING 8
+#define BTRFS_FS_UPDATE_UUID_TREE_GEN 9
+#define BTRFS_FS_CREATING_FREE_SPACE_TREE 10
+#define BTRFS_FS_BTREE_ERR 11
+#define BTRFS_FS_LOG1_ERR 12
+#define BTRFS_FS_LOG2_ERR 13
+
struct btrfs_fs_info {
u8 fsid[BTRFS_FSID_SIZE];
u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
+ unsigned long flags;
struct btrfs_root *extent_root;
struct btrfs_root *tree_root;
struct btrfs_root *chunk_root;
@@ -907,10 +924,6 @@ struct btrfs_fs_info {
int thread_pool_size;
struct kobject *space_info_kobj;
- int do_barriers;
- int closing;
- int log_root_recovering;
- int open;
u64 total_pinned;
@@ -987,17 +1000,6 @@ struct btrfs_fs_info {
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
u32 check_integrity_print_mask;
#endif
- /*
- * quota information
- */
- unsigned int quota_enabled:1;
-
- /*
- * quota_enabled only changes state after a commit. This holds the
- * next state.
- */
- unsigned int pending_quota_state:1;
-
/* is qgroup tracking in a consistent state? */
u64 qgroup_flags;
@@ -1061,7 +1063,6 @@ struct btrfs_fs_info {
wait_queue_head_t replace_wait;
struct semaphore uuid_tree_rescan_sem;
- unsigned int update_uuid_tree_gen:1;
/* Used to reclaim the metadata space in the background. */
struct work_struct async_reclaim_work;
@@ -1080,7 +1081,6 @@ struct btrfs_fs_info {
*/
struct list_head pinned_chunks;
- int creating_free_space_tree;
/* Used to record internally whether fs has been frozen */
int fs_frozen;
};
@@ -1435,13 +1435,13 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token)
#define cpu_to_le8(v) (v)
#define __le8 u8
-#define read_eb_member(eb, ptr, type, member, result) ( \
+#define read_eb_member(eb, ptr, type, member, result) (\
read_extent_buffer(eb, (char *)(result), \
((unsigned long)(ptr)) + \
offsetof(type, member), \
sizeof(((type *)0)->member)))
-#define write_eb_member(eb, ptr, type, member, result) ( \
+#define write_eb_member(eb, ptr, type, member, result) (\
write_extent_buffer(eb, (char *)(result), \
((unsigned long)(ptr)) + \
offsetof(type, member), \
@@ -2293,6 +2293,21 @@ static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
return offsetof(struct btrfs_leaf, items);
}
+/*
+ * The leaf data grows from end-to-front in the node.
+ * this returns the address of the start of the last item,
+ * which is the stop of the leaf data stack
+ */
+static inline unsigned int leaf_data_end(struct btrfs_root *root,
+ struct extent_buffer *leaf)
+{
+ u32 nr = btrfs_header_nritems(leaf);
+
+ if (nr == 0)
+ return BTRFS_LEAF_DATA_SIZE(root);
+ return btrfs_item_offset_nr(leaf, nr - 1);
+}
+
/* struct btrfs_file_extent_item */
BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8);
BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr,
@@ -2867,10 +2882,14 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
{
/*
- * Get synced with close_ctree()
+ * Do it this way so we only ever do one test_bit in the normal case.
*/
- smp_mb();
- return fs_info->closing;
+ if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) {
+ if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags))
+ return 2;
+ return 1;
+ }
+ return 0;
}
/*
@@ -3118,7 +3137,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
int nr);
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
- struct extent_state **cached_state);
+ struct extent_state **cached_state, int dedupe);
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root,
struct btrfs_root *parent_root,
@@ -3236,14 +3255,17 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
unsigned long new_flags);
int btrfs_sync_fs(struct super_block *sb, int wait);
+static inline __printf(2, 3)
+void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
+{
+}
+
#ifdef CONFIG_PRINTK
__printf(2, 3)
void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...);
#else
-static inline __printf(2, 3)
-void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
-{
-}
+#define btrfs_printk(fs_info, fmt, args...) \
+ btrfs_no_printk(fs_info, fmt, ##args)
#endif
#define btrfs_emerg(fs_info, fmt, args...) \
@@ -3314,7 +3336,35 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
btrfs_printk_ratelimited(fs_info, KERN_NOTICE fmt, ##args)
#define btrfs_info_rl(fs_info, fmt, args...) \
btrfs_printk_ratelimited(fs_info, KERN_INFO fmt, ##args)
-#ifdef DEBUG
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define btrfs_debug(fs_info, fmt, args...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ btrfs_printk(fs_info, KERN_DEBUG fmt, ##args); \
+} while (0)
+#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ btrfs_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args); \
+} while (0)
+#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt, \
+ ##args);\
+} while (0)
+#define btrfs_debug_rl(fs_info, fmt, args...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, \
+ ##args); \
+} while (0)
+#elif defined(DEBUG)
#define btrfs_debug(fs_info, fmt, args...) \
btrfs_printk(fs_info, KERN_DEBUG fmt, ##args)
#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
@@ -3325,13 +3375,13 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, ##args)
#else
#define btrfs_debug(fs_info, fmt, args...) \
- no_printk(KERN_DEBUG fmt, ##args)
+ btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
- no_printk(KERN_DEBUG fmt, ##args)
+ btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
- no_printk(KERN_DEBUG fmt, ##args)
+ btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
#define btrfs_debug_rl(fs_info, fmt, args...) \
- no_printk(KERN_DEBUG fmt, ##args)
+ btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
#endif
#define btrfs_printk_in_rcu(fs_info, fmt, args...) \
@@ -3362,7 +3412,7 @@ do { \
__cold
static inline void assfail(char *expr, char *file, int line)
{
- pr_err("BTRFS: assertion failed: %s, file: %s, line: %d",
+ pr_err("assertion failed: %s, file: %s, line: %d\n",
expr, file, line);
BUG();
}
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 3eeb9cd8cfa5..0fcf5f25d524 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -385,11 +385,8 @@ static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
struct btrfs_delayed_node *delayed_node,
struct btrfs_key *key)
{
- struct btrfs_delayed_item *item;
-
- item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
+ return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
NULL, NULL);
- return item;
}
static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
@@ -1481,11 +1478,10 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
mutex_lock(&delayed_node->mutex);
ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
if (unlikely(ret)) {
- btrfs_err(root->fs_info, "err add delayed dir index item(name: %.*s) "
- "into the insertion tree of the delayed node"
- "(root id: %llu, inode id: %llu, errno: %d)",
- name_len, name, delayed_node->root->objectid,
- delayed_node->inode_id, ret);
+ btrfs_err(root->fs_info,
+ "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
+ name_len, name, delayed_node->root->objectid,
+ delayed_node->inode_id, ret);
BUG();
}
mutex_unlock(&delayed_node->mutex);
@@ -1553,11 +1549,9 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
mutex_lock(&node->mutex);
ret = __btrfs_add_delayed_deletion_item(node, item);
if (unlikely(ret)) {
- btrfs_err(root->fs_info, "err add delayed dir index item(index: %llu) "
- "into the deletion tree of the delayed node"
- "(root id: %llu, inode id: %llu, errno: %d)",
- index, node->root->objectid, node->inode_id,
- ret);
+ btrfs_err(root->fs_info,
+ "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
+ index, node->root->objectid, node->inode_id, ret);
BUG();
}
mutex_unlock(&node->mutex);
@@ -1874,7 +1868,8 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
* leads to enospc problems. This means we also can't do
* delayed inode refs
*/
- if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
+ if (test_bit(BTRFS_FS_LOG_RECOVERING,
+ &BTRFS_I(inode)->root->fs_info->flags))
return -EAGAIN;
delayed_node = btrfs_get_or_create_delayed_node(inode);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index ac02e041464b..8d93854a4b4f 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -322,10 +322,11 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
elem = list_first_entry(&fs_info->tree_mod_seq_list,
struct seq_list, list);
if (seq >= elem->seq) {
- pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
- (u32)(seq >> 32), (u32)seq,
- (u32)(elem->seq >> 32), (u32)elem->seq,
- delayed_refs);
+ btrfs_debug(fs_info,
+ "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
+ (u32)(seq >> 32), (u32)seq,
+ (u32)(elem->seq >> 32), (u32)elem->seq,
+ delayed_refs);
ret = 1;
}
}
@@ -770,7 +771,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
if (!head_ref)
goto free_ref;
- if (fs_info->quota_enabled && is_fstree(ref_root)) {
+ if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
+ is_fstree(ref_root)) {
record = kmalloc(sizeof(*record), GFP_NOFS);
if (!record)
goto free_head_ref;
@@ -828,7 +830,8 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
return -ENOMEM;
}
- if (fs_info->quota_enabled && is_fstree(ref_root)) {
+ if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
+ is_fstree(ref_root)) {
record = kmalloc(sizeof(*record), GFP_NOFS);
if (!record) {
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index e9bbff3c0029..05169ef30596 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -218,8 +218,9 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
}
ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
if (ret < 0) {
- btrfs_warn(fs_info, "error %d while searching for dev_replace item!",
- ret);
+ btrfs_warn(fs_info,
+ "error %d while searching for dev_replace item!",
+ ret);
goto out;
}
@@ -238,8 +239,9 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
*/
ret = btrfs_del_item(trans, dev_root, path);
if (ret != 0) {
- btrfs_warn(fs_info, "delete too small dev_replace item failed %d!",
- ret);
+ btrfs_warn(fs_info,
+ "delete too small dev_replace item failed %d!",
+ ret);
goto out;
}
ret = 1;
@@ -251,8 +253,8 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, dev_root, path,
&key, sizeof(*ptr));
if (ret < 0) {
- btrfs_warn(fs_info, "insert dev_replace item failed %d!",
- ret);
+ btrfs_warn(fs_info,
+ "insert dev_replace item failed %d!", ret);
goto out;
}
}
@@ -383,7 +385,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
ret = btrfs_sysfs_add_device_link(tgt_device->fs_devices, tgt_device);
if (ret)
- btrfs_err(fs_info, "kobj add dev failed %d\n", ret);
+ btrfs_err(fs_info, "kobj add dev failed %d", ret);
btrfs_wait_ordered_roots(root->fs_info, -1, 0, (u64)-1);
@@ -772,9 +774,10 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
break;
}
if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) {
- btrfs_info(fs_info, "cannot continue dev_replace, tgtdev is missing");
btrfs_info(fs_info,
- "you may cancel the operation after 'mount -o degraded'");
+ "cannot continue dev_replace, tgtdev is missing");
+ btrfs_info(fs_info,
+ "you may cancel the operation after 'mount -o degraded'");
btrfs_dev_replace_unlock(dev_replace, 1);
return 0;
}
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 1752625fb4dd..0dc1a033275e 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -472,9 +472,10 @@ int verify_dir_item(struct btrfs_root *root,
/* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
if ((btrfs_dir_data_len(leaf, dir_item) +
btrfs_dir_name_len(leaf, dir_item)) > BTRFS_MAX_XATTR_SIZE(root)) {
- btrfs_crit(root->fs_info, "invalid dir item name + data len: %u + %u",
- (unsigned)btrfs_dir_name_len(leaf, dir_item),
- (unsigned)btrfs_dir_data_len(leaf, dir_item));
+ btrfs_crit(root->fs_info,
+ "invalid dir item name + data len: %u + %u",
+ (unsigned)btrfs_dir_name_len(leaf, dir_item),
+ (unsigned)btrfs_dir_data_len(leaf, dir_item));
return 1;
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 54bc8c7c6bcd..e720d3e6ec20 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -326,8 +326,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
read_extent_buffer(buf, &val, 0, csum_size);
btrfs_warn_rl(fs_info,
- "%s checksum verify failed on %llu wanted %X found %X "
- "level %d",
+ "%s checksum verify failed on %llu wanted %X found %X level %d",
fs_info->sb->s_id, buf->start,
val, found, btrfs_header_level(buf));
if (result != (char *)&inline_result)
@@ -402,7 +401,8 @@ out:
* Return 0 if the superblock checksum type matches the checksum value of that
* algorithm. Pass the raw disk superblock data.
*/
-static int btrfs_check_super_csum(char *raw_disk_sb)
+static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
+ char *raw_disk_sb)
{
struct btrfs_super_block *disk_sb =
(struct btrfs_super_block *)raw_disk_sb;
@@ -428,7 +428,7 @@ static int btrfs_check_super_csum(char *raw_disk_sb)
}
if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
- printk(KERN_ERR "BTRFS: unsupported checksum algorithm %u\n",
+ btrfs_err(fs_info, "unsupported checksum algorithm %u",
csum_type);
ret = 1;
}
@@ -442,7 +442,7 @@ static int btrfs_check_super_csum(char *raw_disk_sb)
*/
static int btree_read_extent_buffer_pages(struct btrfs_root *root,
struct extent_buffer *eb,
- u64 start, u64 parent_transid)
+ u64 parent_transid)
{
struct extent_io_tree *io_tree;
int failed = 0;
@@ -454,8 +454,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
while (1) {
- ret = read_extent_buffer_pages(io_tree, eb, start,
- WAIT_COMPLETE,
+ ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
btree_get_extent, mirror_num);
if (!ret) {
if (!verify_parent_transid(io_tree, eb,
@@ -547,9 +546,10 @@ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
}
#define CORRUPT(reason, eb, root, slot) \
- btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu," \
- "root=%llu, slot=%d", reason, \
- btrfs_header_bytenr(eb), root->objectid, slot)
+ btrfs_crit(root->fs_info, "corrupt %s, %s: block=%llu," \
+ " root=%llu, slot=%d", \
+ btrfs_header_level(eb) == 0 ? "leaf" : "node",\
+ reason, btrfs_header_bytenr(eb), root->objectid, slot)
static noinline int check_leaf(struct btrfs_root *root,
struct extent_buffer *leaf)
@@ -636,6 +636,10 @@ static noinline int check_leaf(struct btrfs_root *root,
static int check_node(struct btrfs_root *root, struct extent_buffer *node)
{
unsigned long nr = btrfs_header_nritems(node);
+ struct btrfs_key key, next_key;
+ int slot;
+ u64 bytenr;
+ int ret = 0;
if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) {
btrfs_crit(root->fs_info,
@@ -643,7 +647,26 @@ static int check_node(struct btrfs_root *root, struct extent_buffer *node)
node->start, root->objectid, nr);
return -EIO;
}
- return 0;
+
+ for (slot = 0; slot < nr - 1; slot++) {
+ bytenr = btrfs_node_blockptr(node, slot);
+ btrfs_node_key_to_cpu(node, &key, slot);
+ btrfs_node_key_to_cpu(node, &next_key, slot + 1);
+
+ if (!bytenr) {
+ CORRUPT("invalid item slot", node, root, slot);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
+ CORRUPT("bad key order", node, root, slot);
+ ret = -EIO;
+ goto out;
+ }
+ }
+out:
+ return ret;
}
static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
@@ -1132,7 +1155,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
if (IS_ERR(buf))
return;
read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
- buf, 0, WAIT_NONE, btree_get_extent, 0);
+ buf, WAIT_NONE, btree_get_extent, 0);
free_extent_buffer(buf);
}
@@ -1150,7 +1173,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
- ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
+ ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
btree_get_extent, mirror_num);
if (ret) {
free_extent_buffer(buf);
@@ -1206,7 +1229,7 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
if (IS_ERR(buf))
return buf;
- ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
+ ret = btree_read_extent_buffer_pages(root, buf, parent_transid);
if (ret) {
free_extent_buffer(buf);
return ERR_PTR(ret);
@@ -1839,7 +1862,7 @@ static int cleaner_kthread(void *arg)
* Do not do anything if we might cause open_ctree() to block
* before we have finished mounting the filesystem.
*/
- if (!root->fs_info->open)
+ if (!test_bit(BTRFS_FS_OPEN, &root->fs_info->flags))
goto sleep;
if (!mutex_trylock(&root->fs_info->cleaner_mutex))
@@ -2332,8 +2355,6 @@ static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
fs_info->qgroup_op_tree = RB_ROOT;
INIT_LIST_HEAD(&fs_info->dirty_qgroups);
fs_info->qgroup_seq = 1;
- fs_info->quota_enabled = 0;
- fs_info->pending_quota_state = 0;
fs_info->qgroup_ulist = NULL;
fs_info->qgroup_rescan_running = false;
mutex_init(&fs_info->qgroup_rescan_lock);
@@ -2518,8 +2539,7 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info,
root = btrfs_read_tree_root(tree_root, &location);
if (!IS_ERR(root)) {
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
- fs_info->quota_enabled = 1;
- fs_info->pending_quota_state = 1;
+ set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
fs_info->quota_root = root;
}
@@ -2710,8 +2730,7 @@ int open_ctree(struct super_block *sb,
extent_io_tree_init(&fs_info->freed_extents[1],
fs_info->btree_inode->i_mapping);
fs_info->pinned_extents = &fs_info->freed_extents[0];
- fs_info->do_barriers = 1;
-
+ set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
mutex_init(&fs_info->ordered_operations_mutex);
mutex_init(&fs_info->tree_log_mutex);
@@ -2762,7 +2781,7 @@ int open_ctree(struct super_block *sb,
* We want to check superblock checksum, the type is stored inside.
* Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
*/
- if (btrfs_check_super_csum(bh->b_data)) {
+ if (btrfs_check_super_csum(fs_info, bh->b_data)) {
btrfs_err(fs_info, "superblock checksum mismatch");
err = -EINVAL;
brelse(bh);
@@ -3199,10 +3218,9 @@ retry_root_backup:
return ret;
}
} else {
- fs_info->update_uuid_tree_gen = 1;
+ set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
}
-
- fs_info->open = 1;
+ set_bit(BTRFS_FS_OPEN, &fs_info->flags);
/*
* backuproot only affect mount behavior, and if open_ctree succeeded,
@@ -3607,7 +3625,7 @@ int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
}
if (min_tolerated == INT_MAX) {
- pr_warn("BTRFS: unknown raid flag: %llu\n", flags);
+ pr_warn("BTRFS: unknown raid flag: %llu", flags);
min_tolerated = 0;
}
@@ -3893,8 +3911,7 @@ void close_ctree(struct btrfs_root *root)
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
- fs_info->closing = 1;
- smp_mb();
+ set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
/* wait for the qgroup rescan worker to stop */
btrfs_qgroup_wait_for_completion(fs_info, false);
@@ -3939,8 +3956,7 @@ void close_ctree(struct btrfs_root *root)
kthread_stop(fs_info->transaction_kthread);
kthread_stop(fs_info->cleaner_kthread);
- fs_info->closing = 2;
- smp_mb();
+ set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
btrfs_free_qgroup_config(fs_info);
@@ -3965,7 +3981,7 @@ void close_ctree(struct btrfs_root *root)
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
btrfs_stop_all_workers(fs_info);
- fs_info->open = 0;
+ clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
free_root_pointers(fs_info, 1);
iput(fs_info->btree_inode);
@@ -4036,8 +4052,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
root = BTRFS_I(buf->pages[0]->mapping->host)->root;
btrfs_assert_tree_locked(buf);
if (transid != root->fs_info->generation)
- WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
- "found %llu running %llu\n",
+ WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
buf->start, transid, root->fs_info->generation);
was_dirty = set_extent_buffer_dirty(buf);
if (!was_dirty)
@@ -4088,7 +4103,7 @@ void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
{
struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
- return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
+ return btree_read_extent_buffer_pages(root, buf, parent_transid);
}
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
@@ -4100,24 +4115,24 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int ret = 0;
if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
- printk(KERN_ERR "BTRFS: no valid FS found\n");
+ btrfs_err(fs_info, "no valid FS found");
ret = -EINVAL;
}
if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
- printk(KERN_WARNING "BTRFS: unrecognized super flag: %llu\n",
+ btrfs_warn(fs_info, "unrecognized super flag: %llu",
btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
- printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n",
+ btrfs_err(fs_info, "tree_root level too big: %d >= %d",
btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
ret = -EINVAL;
}
if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
- printk(KERN_ERR "BTRFS: chunk_root level too big: %d >= %d\n",
+ btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
ret = -EINVAL;
}
if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
- printk(KERN_ERR "BTRFS: log_root level too big: %d >= %d\n",
+ btrfs_err(fs_info, "log_root level too big: %d >= %d",
btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
ret = -EINVAL;
}
@@ -4128,47 +4143,48 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
*/
if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
- printk(KERN_ERR "BTRFS: invalid sectorsize %llu\n", sectorsize);
+ btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
ret = -EINVAL;
}
/* Only PAGE SIZE is supported yet */
if (sectorsize != PAGE_SIZE) {
- printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n",
- sectorsize, PAGE_SIZE);
+ btrfs_err(fs_info,
+ "sectorsize %llu not supported yet, only support %lu",
+ sectorsize, PAGE_SIZE);
ret = -EINVAL;
}
if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
- printk(KERN_ERR "BTRFS: invalid nodesize %llu\n", nodesize);
+ btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
ret = -EINVAL;
}
if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
- printk(KERN_ERR "BTRFS: invalid leafsize %u, should be %llu\n",
- le32_to_cpu(sb->__unused_leafsize),
- nodesize);
+ btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
+ le32_to_cpu(sb->__unused_leafsize), nodesize);
ret = -EINVAL;
}
/* Root alignment check */
if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
- printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
- btrfs_super_root(sb));
+ btrfs_warn(fs_info, "tree_root block unaligned: %llu",
+ btrfs_super_root(sb));
ret = -EINVAL;
}
if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
- printk(KERN_WARNING "BTRFS: chunk_root block unaligned: %llu\n",
- btrfs_super_chunk_root(sb));
+ btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
+ btrfs_super_chunk_root(sb));
ret = -EINVAL;
}
if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
- printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n",
- btrfs_super_log_root(sb));
+ btrfs_warn(fs_info, "log_root block unaligned: %llu",
+ btrfs_super_log_root(sb));
ret = -EINVAL;
}
if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
- printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n",
- fs_info->fsid, sb->dev_item.fsid);
+ btrfs_err(fs_info,
+ "dev_item UUID does not match fsid: %pU != %pU",
+ fs_info->fsid, sb->dev_item.fsid);
ret = -EINVAL;
}
@@ -4178,25 +4194,25 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
*/
if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
btrfs_err(fs_info, "bytes_used is too small %llu",
- btrfs_super_bytes_used(sb));
+ btrfs_super_bytes_used(sb));
ret = -EINVAL;
}
if (!is_power_of_2(btrfs_super_stripesize(sb))) {
btrfs_err(fs_info, "invalid stripesize %u",
- btrfs_super_stripesize(sb));
+ btrfs_super_stripesize(sb));
ret = -EINVAL;
}
if (btrfs_super_num_devices(sb) > (1UL << 31))
- printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
- btrfs_super_num_devices(sb));
+ btrfs_warn(fs_info, "suspicious number of devices: %llu",
+ btrfs_super_num_devices(sb));
if (btrfs_super_num_devices(sb) == 0) {
- printk(KERN_ERR "BTRFS: number of devices is 0\n");
+ btrfs_err(fs_info, "number of devices is 0");
ret = -EINVAL;
}
if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
- printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n",
- btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
+ btrfs_err(fs_info, "super offset mismatch %llu != %u",
+ btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
ret = -EINVAL;
}
@@ -4205,17 +4221,17 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
* and one chunk
*/
if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
- printk(KERN_ERR "BTRFS: system chunk array too big %u > %u\n",
- btrfs_super_sys_array_size(sb),
- BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
+ btrfs_err(fs_info, "system chunk array too big %u > %u",
+ btrfs_super_sys_array_size(sb),
+ BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
ret = -EINVAL;
}
if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
+ sizeof(struct btrfs_chunk)) {
- printk(KERN_ERR "BTRFS: system chunk array too small %u < %zu\n",
- btrfs_super_sys_array_size(sb),
- sizeof(struct btrfs_disk_key)
- + sizeof(struct btrfs_chunk));
+ btrfs_err(fs_info, "system chunk array too small %u < %zu",
+ btrfs_super_sys_array_size(sb),
+ sizeof(struct btrfs_disk_key)
+ + sizeof(struct btrfs_chunk));
ret = -EINVAL;
}
@@ -4224,14 +4240,16 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
* but it's still possible that it's the one that's wrong.
*/
if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
- printk(KERN_WARNING
- "BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n",
- btrfs_super_generation(sb), btrfs_super_chunk_root_generation(sb));
+ btrfs_warn(fs_info,
+ "suspicious: generation < chunk_root_generation: %llu < %llu",
+ btrfs_super_generation(sb),
+ btrfs_super_chunk_root_generation(sb));
if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
&& btrfs_super_cache_generation(sb) != (u64)-1)
- printk(KERN_WARNING
- "BTRFS: suspicious: generation < cache_generation: %llu < %llu\n",
- btrfs_super_generation(sb), btrfs_super_cache_generation(sb));
+ btrfs_warn(fs_info,
+ "suspicious: generation < cache_generation: %llu < %llu",
+ btrfs_super_generation(sb),
+ btrfs_super_cache_generation(sb));
return ret;
}
@@ -4475,9 +4493,80 @@ again:
return 0;
}
+static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
+{
+ struct inode *inode;
+
+ inode = cache->io_ctl.inode;
+ if (inode) {
+ invalidate_inode_pages2(inode->i_mapping);
+ BTRFS_I(inode)->generation = 0;
+ cache->io_ctl.inode = NULL;
+ iput(inode);
+ }
+ btrfs_put_block_group(cache);
+}
+
+void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
+ struct btrfs_root *root)
+{
+ struct btrfs_block_group_cache *cache;
+
+ spin_lock(&cur_trans->dirty_bgs_lock);
+ while (!list_empty(&cur_trans->dirty_bgs)) {
+ cache = list_first_entry(&cur_trans->dirty_bgs,
+ struct btrfs_block_group_cache,
+ dirty_list);
+ if (!cache) {
+ btrfs_err(root->fs_info,
+ "orphan block group dirty_bgs list");
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+ return;
+ }
+
+ if (!list_empty(&cache->io_list)) {
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+ list_del_init(&cache->io_list);
+ btrfs_cleanup_bg_io(cache);
+ spin_lock(&cur_trans->dirty_bgs_lock);
+ }
+
+ list_del_init(&cache->dirty_list);
+ spin_lock(&cache->lock);
+ cache->disk_cache_state = BTRFS_DC_ERROR;
+ spin_unlock(&cache->lock);
+
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+ btrfs_put_block_group(cache);
+ spin_lock(&cur_trans->dirty_bgs_lock);
+ }
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+
+ while (!list_empty(&cur_trans->io_bgs)) {
+ cache = list_first_entry(&cur_trans->io_bgs,
+ struct btrfs_block_group_cache,
+ io_list);
+ if (!cache) {
+ btrfs_err(root->fs_info,
+ "orphan block group on io_bgs list");
+ return;
+ }
+
+ list_del_init(&cache->io_list);
+ spin_lock(&cache->lock);
+ cache->disk_cache_state = BTRFS_DC_ERROR;
+ spin_unlock(&cache->lock);
+ btrfs_cleanup_bg_io(cache);
+ }
+}
+
void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
struct btrfs_root *root)
{
+ btrfs_cleanup_dirty_bgs(cur_trans, root);
+ ASSERT(list_empty(&cur_trans->dirty_bgs));
+ ASSERT(list_empty(&cur_trans->io_bgs));
+
btrfs_destroy_delayed_refs(cur_trans, root);
cur_trans->state = TRANS_STATE_COMMIT_START;
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index f19a982f5a4f..1a3237e5700f 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -136,6 +136,8 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
+void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *trans,
+ struct btrfs_root *root);
void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
struct btrfs_root *root);
struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 665da8f66ff1..210c94ac8818 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -87,7 +87,8 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
int force);
static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key);
-static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
+static void dump_space_info(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *info, u64 bytes,
int dump_block_groups);
static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
u64 ram_bytes, u64 num_bytes, int delalloc);
@@ -266,9 +267,8 @@ static int exclude_super_stripes(struct btrfs_root *root,
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
- ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
- cache->key.objectid, bytenr,
- 0, &logical, &nr, &stripe_len);
+ ret = btrfs_rmap_block(root->fs_info, cache->key.objectid,
+ bytenr, 0, &logical, &nr, &stripe_len);
if (ret)
return ret;
@@ -730,11 +730,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
static struct btrfs_block_group_cache *
btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
{
- struct btrfs_block_group_cache *cache;
-
- cache = block_group_cache_tree_search(info, bytenr, 0);
-
- return cache;
+ return block_group_cache_tree_search(info, bytenr, 0);
}
/*
@@ -744,11 +740,7 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(
struct btrfs_fs_info *info,
u64 bytenr)
{
- struct btrfs_block_group_cache *cache;
-
- cache = block_group_cache_tree_search(info, bytenr, 1);
-
- return cache;
+ return block_group_cache_tree_search(info, bytenr, 1);
}
static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
@@ -2360,7 +2352,13 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
ins.type = BTRFS_EXTENT_ITEM_KEY;
}
- BUG_ON(node->ref_mod != 1);
+ if (node->ref_mod != 1) {
+ btrfs_err(root->fs_info,
+ "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
+ node->bytenr, node->ref_mod, node->action, ref_root,
+ parent);
+ return -EIO;
+ }
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
BUG_ON(!extent_op || !extent_op->update_flags);
ret = alloc_reserved_tree_block(trans, root,
@@ -2590,7 +2588,9 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (must_insert_reserved)
locked_ref->must_insert_reserved = 1;
locked_ref->processing = 0;
- btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
+ btrfs_debug(fs_info,
+ "run_delayed_extent_op returned %d",
+ ret);
btrfs_delayed_ref_unlock(locked_ref);
return ret;
}
@@ -2650,7 +2650,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
locked_ref->processing = 0;
btrfs_delayed_ref_unlock(locked_ref);
btrfs_put_delayed_ref(ref);
- btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
+ btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
+ ret);
return ret;
}
@@ -2940,7 +2941,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (trans->aborted)
return 0;
- if (root->fs_info->creating_free_space_tree)
+ if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &root->fs_info->flags))
return 0;
if (root == root->fs_info->extent_root)
@@ -2971,7 +2972,6 @@ again:
spin_unlock(&delayed_refs->lock);
goto out;
}
- count = (unsigned long)-1;
while (node) {
head = rb_entry(node, struct btrfs_delayed_ref_head,
@@ -3694,6 +3694,8 @@ again:
goto again;
}
spin_unlock(&cur_trans->dirty_bgs_lock);
+ } else if (ret < 0) {
+ btrfs_cleanup_dirty_bgs(cur_trans, root);
}
btrfs_free_path(path);
@@ -4429,7 +4431,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
if (left < thresh && btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
left, thresh, type);
- dump_space_info(info, 0, 0);
+ dump_space_info(root->fs_info, info, 0, 0);
}
if (left < thresh) {
@@ -5186,7 +5188,7 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
* which means we won't have fs_info->fs_root set, so don't do
* the async reclaim as we will panic.
*/
- if (!root->fs_info->log_root_recovering &&
+ if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags) &&
need_do_async_reclaim(space_info, root, used) &&
!work_busy(&root->fs_info->async_reclaim_work)) {
trace_btrfs_trigger_flush(root->fs_info,
@@ -5792,7 +5794,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
int ret;
struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
- if (root->fs_info->quota_enabled) {
+ if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) {
/* One for parent inode, two for dir entries */
num_bytes = 3 * root->nodesize;
ret = btrfs_qgroup_reserve_meta(root, num_bytes);
@@ -5970,7 +5972,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
csum_bytes = BTRFS_I(inode)->csum_bytes;
spin_unlock(&BTRFS_I(inode)->lock);
- if (root->fs_info->quota_enabled) {
+ if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) {
ret = btrfs_qgroup_reserve_meta(root,
nr_extents * root->nodesize);
if (ret)
@@ -6110,8 +6112,6 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
* @start: start range we are writing to
* @len: how long the range we are writing to
*
- * TODO: This function will finally replace old btrfs_delalloc_reserve_space()
- *
* This will do the following things
*
* o reserve space in data space info for num bytes
@@ -6930,8 +6930,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
}
if (ret) {
- btrfs_err(info, "umm, got %d back from search, was looking for %llu",
- ret, bytenr);
+ btrfs_err(info,
+ "umm, got %d back from search, was looking for %llu",
+ ret, bytenr);
if (ret > 0)
btrfs_print_leaf(extent_root,
path->nodes[0]);
@@ -6977,7 +6978,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, extent_root, &key, path,
-1, 1);
if (ret) {
- btrfs_err(info, "umm, got %d back from search, was looking for %llu",
+ btrfs_err(info,
+ "umm, got %d back from search, was looking for %llu",
ret, bytenr);
btrfs_print_leaf(extent_root, path->nodes[0]);
}
@@ -7004,8 +7006,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
refs = btrfs_extent_refs(leaf, ei);
if (refs < refs_to_drop) {
- btrfs_err(info, "trying to drop %d refs but we only have %Lu "
- "for bytenr %Lu", refs_to_drop, refs, bytenr);
+ btrfs_err(info,
+ "trying to drop %d refs but we only have %Lu for bytenr %Lu",
+ refs_to_drop, refs, bytenr);
ret = -EINVAL;
btrfs_abort_transaction(trans, ret);
goto out;
@@ -7901,23 +7904,24 @@ out:
return ret;
}
-static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
+static void dump_space_info(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *info, u64 bytes,
int dump_block_groups)
{
struct btrfs_block_group_cache *cache;
int index = 0;
spin_lock(&info->lock);
- printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
- info->flags,
- info->total_bytes - info->bytes_used - info->bytes_pinned -
- info->bytes_reserved - info->bytes_readonly -
- info->bytes_may_use, (info->full) ? "" : "not ");
- printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
- "reserved=%llu, may_use=%llu, readonly=%llu\n",
- info->total_bytes, info->bytes_used, info->bytes_pinned,
- info->bytes_reserved, info->bytes_may_use,
- info->bytes_readonly);
+ btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
+ info->flags,
+ info->total_bytes - info->bytes_used - info->bytes_pinned -
+ info->bytes_reserved - info->bytes_readonly -
+ info->bytes_may_use, (info->full) ? "" : "not ");
+ btrfs_info(fs_info,
+ "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
+ info->total_bytes, info->bytes_used, info->bytes_pinned,
+ info->bytes_reserved, info->bytes_may_use,
+ info->bytes_readonly);
spin_unlock(&info->lock);
if (!dump_block_groups)
@@ -7927,12 +7931,11 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
again:
list_for_each_entry(cache, &info->block_groups[index], list) {
spin_lock(&cache->lock);
- printk(KERN_INFO "BTRFS: "
- "block group %llu has %llu bytes, "
- "%llu used %llu pinned %llu reserved %s\n",
- cache->key.objectid, cache->key.offset,
- btrfs_block_group_used(&cache->item), cache->pinned,
- cache->reserved, cache->ro ? "[readonly]" : "");
+ btrfs_info(fs_info,
+ "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
+ cache->key.objectid, cache->key.offset,
+ btrfs_block_group_used(&cache->item), cache->pinned,
+ cache->reserved, cache->ro ? "[readonly]" : "");
btrfs_dump_free_space(cache, bytes);
spin_unlock(&cache->lock);
}
@@ -7946,6 +7949,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
u64 empty_size, u64 hint_byte,
struct btrfs_key *ins, int is_data, int delalloc)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
bool final_tried = num_bytes == min_alloc_size;
u64 flags;
int ret;
@@ -7956,8 +7960,7 @@ again:
ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
hint_byte, ins, flags, delalloc);
if (!ret && !is_data) {
- btrfs_dec_block_group_reservations(root->fs_info,
- ins->objectid);
+ btrfs_dec_block_group_reservations(fs_info, ins->objectid);
} else if (ret == -ENOSPC) {
if (!final_tried && ins->offset) {
num_bytes = min(num_bytes >> 1, ins->offset);
@@ -7967,14 +7970,15 @@ again:
if (num_bytes == min_alloc_size)
final_tried = true;
goto again;
- } else if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
+ } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
struct btrfs_space_info *sinfo;
- sinfo = __find_space_info(root->fs_info, flags);
- btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
- flags, num_bytes);
+ sinfo = __find_space_info(fs_info, flags);
+ btrfs_err(root->fs_info,
+ "allocation failed flags %llu, wanted %llu",
+ flags, num_bytes);
if (sinfo)
- dump_space_info(sinfo, num_bytes, 1);
+ dump_space_info(fs_info, sinfo, num_bytes, 1);
}
}
@@ -8462,7 +8466,6 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
u64 refs;
u64 flags;
u32 nritems;
- u32 blocksize;
struct btrfs_key key;
struct extent_buffer *eb;
int ret;
@@ -8480,7 +8483,6 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
eb = path->nodes[wc->level];
nritems = btrfs_header_nritems(eb);
- blocksize = root->nodesize;
for (slot = path->slots[wc->level]; slot < nritems; slot++) {
if (nread >= wc->reada_count)
@@ -8544,7 +8546,7 @@ static int account_leaf_items(struct btrfs_trans_handle *trans,
u64 bytenr, num_bytes;
/* We can be called directly from walk_up_proc() */
- if (!root->fs_info->quota_enabled)
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags))
return 0;
for (i = 0; i < nr; i++) {
@@ -8653,7 +8655,7 @@ static int account_shared_subtree(struct btrfs_trans_handle *trans,
BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
BUG_ON(root_eb == NULL);
- if (!root->fs_info->quota_enabled)
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags))
return 0;
if (!extent_buffer_uptodate(root_eb)) {
@@ -8884,14 +8886,13 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
&wc->refs[level - 1],
&wc->flags[level - 1]);
- if (ret < 0) {
- btrfs_tree_unlock(next);
- return ret;
- }
+ if (ret < 0)
+ goto out_unlock;
if (unlikely(wc->refs[level - 1] == 0)) {
btrfs_err(root->fs_info, "Missing references.");
- BUG();
+ ret = -EIO;
+ goto out_unlock;
}
*lookup_info = 0;
@@ -8943,7 +8944,12 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
}
level--;
- BUG_ON(level != btrfs_header_level(next));
+ ASSERT(level == btrfs_header_level(next));
+ if (level != btrfs_header_level(next)) {
+ btrfs_err(root->fs_info, "mismatched level");
+ ret = -EIO;
+ goto out_unlock;
+ }
path->nodes[level] = next;
path->slots[level] = 0;
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
@@ -8958,8 +8964,15 @@ skip:
if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
parent = path->nodes[level]->start;
} else {
- BUG_ON(root->root_key.objectid !=
+ ASSERT(root->root_key.objectid ==
btrfs_header_owner(path->nodes[level]));
+ if (root->root_key.objectid !=
+ btrfs_header_owner(path->nodes[level])) {
+ btrfs_err(root->fs_info,
+ "mismatched block owner");
+ ret = -EIO;
+ goto out_unlock;
+ }
parent = 0;
}
@@ -8968,20 +8981,24 @@ skip:
generation, level - 1);
if (ret) {
btrfs_err_rl(root->fs_info,
- "Error "
- "%d accounting shared subtree. Quota "
- "is out of sync, rescan required.",
- ret);
+ "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
+ ret);
}
}
ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
root->root_key.objectid, level - 1, 0);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret)
+ goto out_unlock;
}
+
+ *lookup_info = 1;
+ ret = 1;
+
+out_unlock:
btrfs_tree_unlock(next);
free_extent_buffer(next);
- *lookup_info = 1;
- return 1;
+
+ return ret;
}
/*
@@ -9061,10 +9078,8 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
ret = account_leaf_items(trans, root, eb);
if (ret) {
btrfs_err_rl(root->fs_info,
- "error "
- "%d accounting leaf items. Quota "
- "is out of sync, rescan required.",
- ret);
+ "error %d accounting leaf items. Quota is out of sync, rescan required.",
+ ret);
}
}
/* make block locked assertion in clean_tree_block happy */
@@ -9180,9 +9195,10 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, int update_ref,
int for_reloc)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct btrfs_trans_handle *trans;
- struct btrfs_root *tree_root = root->fs_info->tree_root;
+ struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root_item *root_item = &root->root_item;
struct walk_control *wc;
struct btrfs_key key;
@@ -9191,7 +9207,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
int level;
bool root_dropped = false;
- btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
+ btrfs_debug(fs_info, "Drop subvolume %llu", root->objectid);
path = btrfs_alloc_path();
if (!path) {
@@ -9320,7 +9336,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
btrfs_end_transaction_throttle(trans, tree_root);
if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
- pr_debug("BTRFS: drop snapshot early exit\n");
+ btrfs_debug(fs_info,
+ "drop snapshot early exit");
err = -EAGAIN;
goto out_free;
}
@@ -9386,7 +9403,7 @@ out:
if (!for_reloc && root_dropped == false)
btrfs_add_dead_root(root);
if (err && err != -EAGAIN)
- btrfs_handle_fs_error(root->fs_info, err, NULL);
+ btrfs_handle_fs_error(fs_info, err, NULL);
return err;
}
@@ -10020,7 +10037,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
if (WARN_ON(space_info->bytes_pinned > 0 ||
space_info->bytes_reserved > 0 ||
space_info->bytes_may_use > 0))
- dump_space_info(space_info, 0, 0);
+ dump_space_info(info, space_info, 0, 0);
list_del(&space_info->list);
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
struct kobject *kobj;
@@ -10069,7 +10086,8 @@ static void __link_block_group(struct btrfs_space_info *space_info,
return;
out_err:
- pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
+ btrfs_warn(cache->fs_info,
+ "failed to add kobject for block cache, ignoring");
}
static struct btrfs_block_group_cache *
@@ -10127,6 +10145,11 @@ int btrfs_read_block_groups(struct btrfs_root *root)
struct extent_buffer *leaf;
int need_clear = 0;
u64 cache_gen;
+ u64 feature;
+ int mixed;
+
+ feature = btrfs_super_incompat_flags(info->super_copy);
+ mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
root = info->extent_root;
key.objectid = 0;
@@ -10180,6 +10203,15 @@ int btrfs_read_block_groups(struct btrfs_root *root)
btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(cache->item));
cache->flags = btrfs_block_group_flags(&cache->item);
+ if (!mixed &&
+ ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
+ (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
+ btrfs_err(info,
+"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
+ cache->key.objectid);
+ ret = -EINVAL;
+ goto error;
+ }
key.objectid = found_key.objectid + found_key.offset;
btrfs_release_path(path);
@@ -10789,7 +10821,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
struct btrfs_trans_handle *trans;
int ret = 0;
- if (!fs_info->open)
+ if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
return;
spin_lock(&fs_info->unused_bgs_lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 44fe66b53c8b..ee40384c394d 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -20,6 +20,7 @@
#include "locking.h"
#include "rcu-string.h"
#include "backref.h"
+#include "transaction.h"
static struct kmem_cache *extent_state_cache;
static struct kmem_cache *extent_buffer_cache;
@@ -74,8 +75,7 @@ void btrfs_leak_debug_check(void)
while (!list_empty(&buffers)) {
eb = list_entry(buffers.next, struct extent_buffer, leak_list);
- printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu "
- "refs %d\n",
+ pr_err("BTRFS: buffer leak start %llu len %lu refs %d\n",
eb->start, eb->len, atomic_read(&eb->refs));
list_del(&eb->leak_list);
kmem_cache_free(extent_buffer_cache, eb);
@@ -460,8 +460,7 @@ static int insert_state(struct extent_io_tree *tree,
if (node) {
struct extent_state *found;
found = rb_entry(node, struct extent_state, rb_node);
- printk(KERN_ERR "BTRFS: found node %llu %llu on insert of "
- "%llu %llu\n",
+ pr_err("BTRFS: found node %llu %llu on insert of %llu %llu\n",
found->start, found->end, start, end);
return -EEXIST;
}
@@ -572,9 +571,8 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
{
- btrfs_panic(tree_fs_info(tree), err, "Locking error: "
- "Extent tree was modified by another "
- "thread while locked.");
+ btrfs_panic(tree_fs_info(tree), err,
+ "Locking error: Extent tree was modified by another thread while locked.");
}
/*
@@ -1729,7 +1727,7 @@ out_failed:
}
void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
- struct page *locked_page,
+ u64 delalloc_end, struct page *locked_page,
unsigned clear_bits,
unsigned long page_ops)
{
@@ -2122,8 +2120,9 @@ int clean_io_failure(struct inode *inode, u64 start, struct page *page,
if (failrec->in_validation) {
/* there was no real error, just free the record */
- pr_debug("clean_io_failure: freeing dummy error at %llu\n",
- failrec->start);
+ btrfs_debug(fs_info,
+ "clean_io_failure: freeing dummy error at %llu",
+ failrec->start);
goto out;
}
if (fs_info->sb->s_flags & MS_RDONLY)
@@ -2189,6 +2188,7 @@ void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
struct io_failure_record **failrec_ret)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct io_failure_record *failrec;
struct extent_map *em;
struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
@@ -2236,8 +2236,9 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
em->compress_type);
}
- pr_debug("Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu\n",
- logical, start, failrec->len);
+ btrfs_debug(fs_info,
+ "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
+ logical, start, failrec->len);
failrec->logical = logical;
free_extent_map(em);
@@ -2255,9 +2256,10 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
return ret;
}
} else {
- pr_debug("Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d\n",
- failrec->logical, failrec->start, failrec->len,
- failrec->in_validation);
+ btrfs_debug(fs_info,
+ "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
+ failrec->logical, failrec->start, failrec->len,
+ failrec->in_validation);
/*
* when data can be on disk more than twice, add to failrec here
* (e.g. with a list for failed_mirror) to make
@@ -2273,18 +2275,19 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
struct io_failure_record *failrec, int failed_mirror)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int num_copies;
- num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
- failrec->logical, failrec->len);
+ num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
if (num_copies == 1) {
/*
* we only have a single copy of the data, so don't bother with
* all the retry and error correction code that follows. no
* matter what the error is, it is very likely to persist.
*/
- pr_debug("Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
- num_copies, failrec->this_mirror, failed_mirror);
+ btrfs_debug(fs_info,
+ "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
+ num_copies, failrec->this_mirror, failed_mirror);
return 0;
}
@@ -2323,8 +2326,9 @@ int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
}
if (failrec->this_mirror > num_copies) {
- pr_debug("Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
- num_copies, failrec->this_mirror, failed_mirror);
+ btrfs_debug(fs_info,
+ "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
+ num_copies, failrec->this_mirror, failed_mirror);
return 0;
}
@@ -2415,8 +2419,9 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
}
bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
- pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n",
- read_mode, failrec->this_mirror, failrec->in_validation);
+ btrfs_debug(btrfs_sb(inode->i_sb),
+ "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
+ read_mode, failrec->this_mirror, failrec->in_validation);
ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
failrec->bio_flags, 0);
@@ -2484,8 +2489,7 @@ static void end_bio_extent_writepage(struct bio *bio)
bvec->bv_offset, bvec->bv_len);
else
btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
- "incomplete page write in btrfs with offset %u and "
- "length %u",
+ "incomplete page write in btrfs with offset %u and length %u",
bvec->bv_offset, bvec->bv_len);
}
@@ -2541,10 +2545,12 @@ static void end_bio_extent_readpage(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
struct inode *inode = page->mapping->host;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
- "mirror=%u\n", (u64)bio->bi_iter.bi_sector,
- bio->bi_error, io_bio->mirror_num);
+ btrfs_debug(fs_info,
+ "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
+ (u64)bio->bi_iter.bi_sector, bio->bi_error,
+ io_bio->mirror_num);
tree = &BTRFS_I(inode)->io_tree;
/* We always issue full-page reads, but if some block
@@ -2554,13 +2560,12 @@ static void end_bio_extent_readpage(struct bio *bio)
* if they don't add up to a full page. */
if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
- btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
- "partial page read in btrfs with offset %u and length %u",
+ btrfs_err(fs_info,
+ "partial page read in btrfs with offset %u and length %u",
bvec->bv_offset, bvec->bv_len);
else
- btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
- "incomplete page read in btrfs with offset %u and "
- "length %u",
+ btrfs_info(fs_info,
+ "incomplete page read in btrfs with offset %u and length %u",
bvec->bv_offset, bvec->bv_len);
}
@@ -3624,7 +3629,6 @@ static void end_extent_buffer_writeback(struct extent_buffer *eb)
static void set_btree_ioerr(struct page *page)
{
struct extent_buffer *eb = (struct extent_buffer *)page->private;
- struct btrfs_inode *btree_ino = BTRFS_I(eb->fs_info->btree_inode);
SetPageError(page);
if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
@@ -3670,13 +3674,13 @@ static void set_btree_ioerr(struct page *page)
*/
switch (eb->log_index) {
case -1:
- set_bit(BTRFS_INODE_BTREE_ERR, &btree_ino->runtime_flags);
+ set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags);
break;
case 0:
- set_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
+ set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags);
break;
case 1:
- set_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
+ set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags);
break;
default:
BUG(); /* unexpected, logic error */
@@ -3721,8 +3725,10 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
struct block_device *bdev = fs_info->fs_devices->latest_bdev;
struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
u64 offset = eb->start;
+ u32 nritems;
unsigned long i, num_pages;
unsigned long bio_flags = 0;
+ unsigned long start, end;
int write_flags = (epd->sync_io ? WRITE_SYNC : 0) | REQ_META;
int ret = 0;
@@ -3732,6 +3738,23 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
bio_flags = EXTENT_BIO_TREE_LOG;
+ /* set btree blocks beyond nritems with 0 to avoid stale content. */
+ nritems = btrfs_header_nritems(eb);
+ if (btrfs_header_level(eb) > 0) {
+ end = btrfs_node_key_ptr_offset(nritems);
+
+ memset_extent_buffer(eb, 0, end, eb->len - end);
+ } else {
+ /*
+ * leaf:
+ * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
+ */
+ start = btrfs_item_nr_offset(nritems);
+ end = btrfs_leaf_data(eb) +
+ leaf_data_end(fs_info->tree_root, eb);
+ memset_extent_buffer(eb, 0, start, end - start);
+ }
+
for (i = 0; i < num_pages; i++) {
struct page *p = eb->pages[i];
@@ -4487,21 +4510,36 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
flags |= (FIEMAP_EXTENT_DELALLOC |
FIEMAP_EXTENT_UNKNOWN);
} else if (fieinfo->fi_extents_max) {
+ struct btrfs_trans_handle *trans;
+
u64 bytenr = em->block_start -
(em->start - em->orig_start);
disko = em->block_start + offset_in_extent;
/*
+ * We need a trans handle to get delayed refs
+ */
+ trans = btrfs_join_transaction(root);
+ /*
+ * It's OK if we can't start a trans we can still check
+ * from commit_root
+ */
+ if (IS_ERR(trans))
+ trans = NULL;
+
+ /*
* As btrfs supports shared space, this information
* can be exported to userspace tools via
* flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0
* then we're just getting a count and we can skip the
* lookup stuff.
*/
- ret = btrfs_check_shared(NULL, root->fs_info,
+ ret = btrfs_check_shared(trans, root->fs_info,
root->objectid,
btrfs_ino(inode), bytenr);
+ if (trans)
+ btrfs_end_transaction(trans, root);
if (ret < 0)
goto out_free;
if (ret)
@@ -5173,11 +5211,10 @@ int extent_buffer_uptodate(struct extent_buffer *eb)
}
int read_extent_buffer_pages(struct extent_io_tree *tree,
- struct extent_buffer *eb, u64 start, int wait,
+ struct extent_buffer *eb, int wait,
get_extent_t *get_extent, int mirror_num)
{
unsigned long i;
- unsigned long start_i;
struct page *page;
int err;
int ret = 0;
@@ -5191,16 +5228,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0;
- if (start) {
- WARN_ON(start < eb->start);
- start_i = (start >> PAGE_SHIFT) -
- (eb->start >> PAGE_SHIFT);
- } else {
- start_i = 0;
- }
-
num_pages = num_extent_pages(eb->start, eb->len);
- for (i = start_i; i < num_pages; i++) {
+ for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
if (wait == WAIT_NONE) {
if (!trylock_page(page))
@@ -5209,21 +5238,29 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
lock_page(page);
}
locked_pages++;
+ }
+ /*
+ * We need to firstly lock all pages to make sure that
+ * the uptodate bit of our pages won't be affected by
+ * clear_extent_buffer_uptodate().
+ */
+ for (i = 0; i < num_pages; i++) {
+ page = eb->pages[i];
if (!PageUptodate(page)) {
num_reads++;
all_uptodate = 0;
}
}
+
if (all_uptodate) {
- if (start_i == 0)
- set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
+ set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
goto unlock_exit;
}
clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
eb->read_mirror = 0;
atomic_set(&eb->io_pages, num_reads);
- for (i = start_i; i < num_pages; i++) {
+ for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
if (!PageUptodate(page)) {
@@ -5264,7 +5301,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
if (ret || wait != WAIT_COMPLETE)
return ret;
- for (i = start_i; i < num_pages; i++) {
+ for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
wait_on_page_locked(page);
if (!PageUptodate(page))
@@ -5274,12 +5311,10 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
return ret;
unlock_exit:
- i = start_i;
while (locked_pages > 0) {
- page = eb->pages[i];
- i++;
- unlock_page(page);
locked_pages--;
+ page = eb->pages[locked_pages];
+ unlock_page(page);
}
return ret;
}
@@ -5382,8 +5417,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
}
if (start + min_len > eb->len) {
- WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
- "wanted %lu %lu\n",
+ WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
eb->start, eb->len, start, min_len);
return -EINVAL;
}
@@ -5713,14 +5747,14 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
if (src_offset + len > dst->len) {
btrfs_err(dst->fs_info,
- "memmove bogus src_offset %lu move "
- "len %lu dst len %lu", src_offset, len, dst->len);
+ "memmove bogus src_offset %lu move len %lu dst len %lu",
+ src_offset, len, dst->len);
BUG_ON(1);
}
if (dst_offset + len > dst->len) {
btrfs_err(dst->fs_info,
- "memmove bogus dst_offset %lu move "
- "len %lu dst len %lu", dst_offset, len, dst->len);
+ "memmove bogus dst_offset %lu move len %lu dst len %lu",
+ dst_offset, len, dst->len);
BUG_ON(1);
}
@@ -5760,13 +5794,15 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_i;
if (src_offset + len > dst->len) {
- btrfs_err(dst->fs_info, "memmove bogus src_offset %lu move "
- "len %lu len %lu", src_offset, len, dst->len);
+ btrfs_err(dst->fs_info,
+ "memmove bogus src_offset %lu move len %lu len %lu",
+ src_offset, len, dst->len);
BUG_ON(1);
}
if (dst_offset + len > dst->len) {
- btrfs_err(dst->fs_info, "memmove bogus dst_offset %lu move "
- "len %lu len %lu", dst_offset, len, dst->len);
+ btrfs_err(dst->fs_info,
+ "memmove bogus dst_offset %lu move len %lu len %lu",
+ dst_offset, len, dst->len);
BUG_ON(1);
}
if (dst_offset < src_offset) {
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 28cd88fccc7e..4a094f1dc7ef 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -359,7 +359,7 @@ void free_extent_buffer_stale(struct extent_buffer *eb);
#define WAIT_COMPLETE 1
#define WAIT_PAGE_LOCK 2
int read_extent_buffer_pages(struct extent_io_tree *tree,
- struct extent_buffer *eb, u64 start, int wait,
+ struct extent_buffer *eb, int wait,
get_extent_t *get_extent, int mirror_num);
void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
@@ -413,7 +413,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
- struct page *locked_page,
+ u64 delalloc_end, struct page *locked_page,
unsigned bits_to_clear,
unsigned long page_ops);
struct bio *
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 36f4589e349c..3a14c87d9c92 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -503,7 +503,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
end_of_last_block = start_pos + num_bytes - 1;
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
- cached);
+ cached, 0);
if (err)
return err;
@@ -1110,13 +1110,25 @@ again:
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
+ if (key.objectid != ino ||
+ key.type != BTRFS_EXTENT_DATA_KEY) {
+ ret = -EINVAL;
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
- BUG_ON(btrfs_file_extent_type(leaf, fi) !=
- BTRFS_FILE_EXTENT_PREALLOC);
+ if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
+ ret = -EINVAL;
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
- BUG_ON(key.offset > start || extent_end < end);
+ if (key.offset > start || extent_end < end) {
+ ret = -EINVAL;
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
@@ -1213,12 +1225,19 @@ again:
ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
root->root_key.objectid,
ino, orig_offset);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
if (split == start) {
key.offset = start;
} else {
- BUG_ON(start != key.offset);
+ if (start != key.offset) {
+ ret = -EINVAL;
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
path->slots[0]--;
extent_end = end;
}
@@ -1240,7 +1259,10 @@ again:
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
}
other_start = 0;
other_end = start;
@@ -1257,7 +1279,10 @@ again:
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
}
if (del_nr == 0) {
fi = btrfs_item_ptr(leaf, path->slots[0],
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index d571bd2b697b..e4b48f377d3a 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -716,8 +716,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
if (BTRFS_I(inode)->generation != generation) {
btrfs_err(root->fs_info,
- "free space inode generation (%llu) "
- "did not match free space cache generation (%llu)",
+ "free space inode generation (%llu) did not match free space cache generation (%llu)",
BTRFS_I(inode)->generation, generation);
return 0;
}
@@ -879,8 +878,9 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
if (!matched) {
__btrfs_remove_free_space_cache(ctl);
- btrfs_warn(fs_info, "block group %llu has wrong amount of free space",
- block_group->key.objectid);
+ btrfs_warn(fs_info,
+ "block group %llu has wrong amount of free space",
+ block_group->key.objectid);
ret = -1;
}
out:
@@ -891,8 +891,9 @@ out:
spin_unlock(&block_group->lock);
ret = 0;
- btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuilding it now",
- block_group->key.objectid);
+ btrfs_warn(fs_info,
+ "failed to load free space cache for block group %llu, rebuilding it now",
+ block_group->key.objectid);
}
iput(inode);
@@ -2298,7 +2299,8 @@ static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
}
}
-int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
+int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
+ struct btrfs_free_space_ctl *ctl,
u64 offset, u64 bytes)
{
struct btrfs_free_space *info;
@@ -2345,7 +2347,7 @@ out:
spin_unlock(&ctl->tree_lock);
if (ret) {
- printk(KERN_CRIT "BTRFS: unable to add free space :%d\n", ret);
+ btrfs_crit(fs_info, "unable to add free space :%d", ret);
ASSERT(ret != -EEXIST);
}
@@ -2621,7 +2623,8 @@ out:
spin_unlock(&ctl->tree_lock);
if (align_gap_len)
- __btrfs_add_free_space(ctl, align_gap, align_gap_len);
+ __btrfs_add_free_space(block_group->fs_info, ctl,
+ align_gap, align_gap_len);
return ret;
}
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 3af651c2bbc7..363fdd955e5d 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -89,13 +89,15 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
struct inode *inode);
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group);
-int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
+int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
+ struct btrfs_free_space_ctl *ctl,
u64 bytenr, u64 size);
static inline int
btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
u64 bytenr, u64 size)
{
- return __btrfs_add_free_space(block_group->free_space_ctl,
+ return __btrfs_add_free_space(block_group->fs_info,
+ block_group->free_space_ctl,
bytenr, size);
}
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index 87e7e3d3e676..e4a42a8e4f84 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -107,7 +107,7 @@ search_free_space_info(struct btrfs_trans_handle *trans,
if (ret < 0)
return ERR_PTR(ret);
if (ret != 0) {
- btrfs_warn(fs_info, "missing free space info for %llu\n",
+ btrfs_warn(fs_info, "missing free space info for %llu",
block_group->key.objectid);
ASSERT(0);
return ERR_PTR(-ENOENT);
@@ -261,7 +261,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
if (extent_count != expected_extent_count) {
- btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u",
+ btrfs_err(fs_info,
+ "incorrect extent count for %llu; counted %u, expected %u",
block_group->key.objectid, extent_count,
expected_extent_count);
ASSERT(0);
@@ -442,7 +443,8 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
}
if (extent_count != expected_extent_count) {
- btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u",
+ btrfs_err(fs_info,
+ "incorrect extent count for %llu; counted %u, expected %u",
block_group->key.objectid, extent_count,
expected_extent_count);
ASSERT(0);
@@ -1163,7 +1165,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
if (IS_ERR(trans))
return PTR_ERR(trans);
- fs_info->creating_free_space_tree = 1;
+ set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
free_space_root = btrfs_create_tree(trans, fs_info,
BTRFS_FREE_SPACE_TREE_OBJECTID);
if (IS_ERR(free_space_root)) {
@@ -1183,7 +1185,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
}
btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
- fs_info->creating_free_space_tree = 0;
+ clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
ret = btrfs_commit_transaction(trans, tree_root);
if (ret)
@@ -1192,7 +1194,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
return 0;
abort:
- fs_info->creating_free_space_tree = 0;
+ clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, tree_root);
return ret;
@@ -1480,7 +1482,8 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
}
if (extent_count != expected_extent_count) {
- btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u",
+ btrfs_err(fs_info,
+ "incorrect extent count for %llu; counted %u, expected %u",
block_group->key.objectid, extent_count,
expected_extent_count);
ASSERT(0);
@@ -1542,7 +1545,8 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
}
if (extent_count != expected_extent_count) {
- btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u",
+ btrfs_err(fs_info,
+ "incorrect extent count for %llu; counted %u, expected %u",
block_group->key.objectid, extent_count,
expected_extent_count);
ASSERT(0);
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 359ee861b5a4..d27014b8bf72 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -104,7 +104,7 @@ again:
break;
if (last != (u64)-1 && last + 1 != key.objectid) {
- __btrfs_add_free_space(ctl, last + 1,
+ __btrfs_add_free_space(fs_info, ctl, last + 1,
key.objectid - last - 1);
wake_up(&root->ino_cache_wait);
}
@@ -115,7 +115,7 @@ next:
}
if (last < root->highest_objectid - 1) {
- __btrfs_add_free_space(ctl, last + 1,
+ __btrfs_add_free_space(fs_info, ctl, last + 1,
root->highest_objectid - last - 1);
}
@@ -136,12 +136,13 @@ out:
static void start_caching(struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
struct task_struct *tsk;
int ret;
u64 objectid;
- if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return;
spin_lock(&root->ino_cache_lock);
@@ -153,7 +154,7 @@ static void start_caching(struct btrfs_root *root)
root->ino_cache_state = BTRFS_CACHE_STARTED;
spin_unlock(&root->ino_cache_lock);
- ret = load_free_ino_cache(root->fs_info, root);
+ ret = load_free_ino_cache(fs_info, root);
if (ret == 1) {
spin_lock(&root->ino_cache_lock);
root->ino_cache_state = BTRFS_CACHE_FINISHED;
@@ -170,15 +171,15 @@ static void start_caching(struct btrfs_root *root)
*/
ret = btrfs_find_free_objectid(root, &objectid);
if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
- __btrfs_add_free_space(ctl, objectid,
+ __btrfs_add_free_space(fs_info, ctl, objectid,
BTRFS_LAST_FREE_OBJECTID - objectid + 1);
}
tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu",
root->root_key.objectid);
if (IS_ERR(tsk)) {
- btrfs_warn(root->fs_info, "failed to start inode caching task");
- btrfs_clear_pending_and_info(root->fs_info, INODE_MAP_CACHE,
+ btrfs_warn(fs_info, "failed to start inode caching task");
+ btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE,
"disabling inode map caching");
}
}
@@ -209,28 +210,29 @@ again:
void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
- if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return;
again:
if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
- __btrfs_add_free_space(pinned, objectid, 1);
+ __btrfs_add_free_space(fs_info, pinned, objectid, 1);
} else {
- down_write(&root->fs_info->commit_root_sem);
+ down_write(&fs_info->commit_root_sem);
spin_lock(&root->ino_cache_lock);
if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
spin_unlock(&root->ino_cache_lock);
- up_write(&root->fs_info->commit_root_sem);
+ up_write(&fs_info->commit_root_sem);
goto again;
}
spin_unlock(&root->ino_cache_lock);
start_caching(root);
- __btrfs_add_free_space(pinned, objectid, 1);
+ __btrfs_add_free_space(fs_info, pinned, objectid, 1);
- up_write(&root->fs_info->commit_root_sem);
+ up_write(&fs_info->commit_root_sem);
}
}
@@ -277,7 +279,8 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
rb_erase(&info->offset_index, rbroot);
spin_unlock(rbroot_lock);
if (add_to_ctl)
- __btrfs_add_free_space(ctl, info->offset, count);
+ __btrfs_add_free_space(root->fs_info, ctl,
+ info->offset, count);
kmem_cache_free(btrfs_free_space_cachep, info);
}
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 22a7ca43c7cd..2b790bda7998 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -560,8 +560,9 @@ cont:
* we don't need to create any more async work items.
* Unlock and free up our temp pages.
*/
- extent_clear_unlock_delalloc(inode, start, end, NULL,
- clear_flags, PAGE_UNLOCK |
+ extent_clear_unlock_delalloc(inode, start, end, end,
+ NULL, clear_flags,
+ PAGE_UNLOCK |
PAGE_CLEAR_DIRTY |
PAGE_SET_WRITEBACK |
page_error_op |
@@ -837,6 +838,8 @@ retry:
extent_clear_unlock_delalloc(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
+ async_extent->start +
+ async_extent->ram_size - 1,
NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
PAGE_SET_WRITEBACK);
@@ -856,7 +859,8 @@ retry:
tree->ops->writepage_end_io_hook(p, start, end,
NULL, 0);
p->mapping = NULL;
- extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
+ extent_clear_unlock_delalloc(inode, start, end, end,
+ NULL, 0,
PAGE_END_WRITEBACK |
PAGE_SET_ERROR);
free_async_extent_pages(async_extent);
@@ -873,6 +877,8 @@ out_free:
extent_clear_unlock_delalloc(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
+ async_extent->start +
+ async_extent->ram_size - 1,
NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
@@ -966,7 +972,8 @@ static noinline int cow_file_range(struct inode *inode,
ret = cow_file_range_inline(root, inode, start, end, 0, 0,
NULL);
if (ret == 0) {
- extent_clear_unlock_delalloc(inode, start, end, NULL,
+ extent_clear_unlock_delalloc(inode, start, end,
+ delalloc_end, NULL,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DEFRAG, PAGE_UNLOCK |
PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
@@ -1062,7 +1069,8 @@ static noinline int cow_file_range(struct inode *inode,
op |= PAGE_SET_PRIVATE2;
extent_clear_unlock_delalloc(inode, start,
- start + ram_size - 1, locked_page,
+ start + ram_size - 1,
+ delalloc_end, locked_page,
EXTENT_LOCKED | EXTENT_DELALLOC,
op);
disk_num_bytes -= cur_alloc_size;
@@ -1079,7 +1087,8 @@ out_reserve:
btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
out_unlock:
- extent_clear_unlock_delalloc(inode, start, end, locked_page,
+ extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
+ locked_page,
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
EXTENT_DELALLOC | EXTENT_DEFRAG,
PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
@@ -1258,7 +1267,8 @@ static noinline int run_delalloc_nocow(struct inode *inode,
path = btrfs_alloc_path();
if (!path) {
- extent_clear_unlock_delalloc(inode, start, end, locked_page,
+ extent_clear_unlock_delalloc(inode, start, end, end,
+ locked_page,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, PAGE_UNLOCK |
@@ -1276,7 +1286,8 @@ static noinline int run_delalloc_nocow(struct inode *inode,
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
- extent_clear_unlock_delalloc(inode, start, end, locked_page,
+ extent_clear_unlock_delalloc(inode, start, end, end,
+ locked_page,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, PAGE_UNLOCK |
@@ -1490,7 +1501,7 @@ out_check:
}
extent_clear_unlock_delalloc(inode, cur_offset,
- cur_offset + num_bytes - 1,
+ cur_offset + num_bytes - 1, end,
locked_page, EXTENT_LOCKED |
EXTENT_DELALLOC |
EXTENT_CLEAR_DATA_RESV,
@@ -1522,7 +1533,7 @@ error:
ret = err;
if (ret && cur_offset < end)
- extent_clear_unlock_delalloc(inode, cur_offset, end,
+ extent_clear_unlock_delalloc(inode, cur_offset, end, end,
locked_page, EXTENT_LOCKED |
EXTENT_DELALLOC | EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
@@ -1988,7 +1999,7 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
}
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
- struct extent_state **cached_state)
+ struct extent_state **cached_state, int dedupe)
{
WARN_ON((end & (PAGE_SIZE - 1)) == 0);
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
@@ -2052,7 +2063,8 @@ again:
goto out;
}
- btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
+ btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state,
+ 0);
ClearPageChecked(page);
set_page_dirty(page);
out:
@@ -2309,7 +2321,7 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
if (PTR_ERR(root) == -ENOENT)
return 0;
WARN_ON(1);
- pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
+ btrfs_debug(fs_info, "inum=%llu, offset=%llu, root_id=%llu",
inum, offset, root_id);
return PTR_ERR(root);
}
@@ -3936,7 +3948,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
*/
if (!btrfs_is_free_space_inode(inode)
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
- && !root->fs_info->log_root_recovering) {
+ && !test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
btrfs_update_root_times(trans, root);
ret = btrfs_delayed_update_inode(trans, root, inode);
@@ -4757,7 +4769,7 @@ again:
0, 0, &cached_state, GFP_NOFS);
ret = btrfs_set_extent_delalloc(inode, block_start, block_end,
- &cached_state);
+ &cached_state, 0);
if (ret) {
unlock_extent_cached(io_tree, block_start, block_end,
&cached_state, GFP_NOFS);
@@ -5223,7 +5235,7 @@ void btrfs_evict_inode(struct inode *inode)
btrfs_free_io_failure_record(inode, 0, (u64)-1);
- if (root->fs_info->log_root_recovering) {
+ if (test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags));
goto no_delete;
@@ -7012,8 +7024,9 @@ not_found_em:
insert:
btrfs_release_path(path);
if (em->start > start || extent_map_end(em) <= start) {
- btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
- em->start, em->len, start, len);
+ btrfs_err(root->fs_info,
+ "bad extent! em: [%llu %llu] passed [%llu %llu]",
+ em->start, em->len, start, len);
err = -EIO;
goto out;
}
@@ -7865,18 +7878,19 @@ static int btrfs_check_dio_repairable(struct inode *inode,
struct io_failure_record *failrec,
int failed_mirror)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int num_copies;
- num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
- failrec->logical, failrec->len);
+ num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
if (num_copies == 1) {
/*
* we only have a single copy of the data, so don't bother with
* all the retry and error correction code that follows. no
* matter what the error is, it is very likely to persist.
*/
- pr_debug("Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
- num_copies, failrec->this_mirror, failed_mirror);
+ btrfs_debug(fs_info,
+ "Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
+ num_copies, failrec->this_mirror, failed_mirror);
return 0;
}
@@ -7886,8 +7900,9 @@ static int btrfs_check_dio_repairable(struct inode *inode,
failrec->this_mirror++;
if (failrec->this_mirror > num_copies) {
- pr_debug("Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
- num_copies, failrec->this_mirror, failed_mirror);
+ btrfs_debug(fs_info,
+ "Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
+ num_copies, failrec->this_mirror, failed_mirror);
return 0;
}
@@ -9055,7 +9070,7 @@ again:
0, 0, &cached_state, GFP_NOFS);
ret = btrfs_set_extent_delalloc(inode, page_start, end,
- &cached_state);
+ &cached_state, 0);
if (ret) {
unlock_extent_cached(io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
@@ -9377,8 +9392,9 @@ void btrfs_destroy_inode(struct inode *inode)
if (!ordered)
break;
else {
- btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
- ordered->file_offset, ordered->len);
+ btrfs_err(root->fs_info,
+ "found ordered extent %llu %llu on inode cleanup",
+ ordered->file_offset, ordered->len);
btrfs_remove_ordered_extent(inode, ordered);
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index b182197f7091..18e1aa0f85f5 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1903,8 +1903,9 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
if (key.objectid == root->root_key.objectid) {
ret = -EPERM;
- btrfs_err(root->fs_info, "deleting default subvolume "
- "%llu is not allowed", key.objectid);
+ btrfs_err(root->fs_info,
+ "deleting default subvolume %llu is not allowed",
+ key.objectid);
goto out;
}
btrfs_release_path(path);
@@ -4097,8 +4098,8 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
if (IS_ERR_OR_NULL(di)) {
btrfs_free_path(path);
btrfs_end_transaction(trans, root);
- btrfs_err(new_root->fs_info, "Umm, you don't have the default dir"
- "item, this isn't going to work");
+ btrfs_err(new_root->fs_info,
+ "Umm, you don't have the default diritem, this isn't going to work");
ret = -ENOENT;
goto out;
}
@@ -5307,8 +5308,9 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
return -EFAULT;
if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
- btrfs_err(root->fs_info, "unable to set label with more than %d bytes",
- BTRFS_LABEL_SIZE - 1);
+ btrfs_err(root->fs_info,
+ "unable to set label with more than %d bytes",
+ BTRFS_LABEL_SIZE - 1);
return -EINVAL;
}
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 1adfbe7be6b8..48655da0f4ca 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -141,7 +141,7 @@ static int lzo_compress_pages(struct list_head *ws,
ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
&out_len, workspace->mem);
if (ret != LZO_E_OK) {
- printk(KERN_DEBUG "BTRFS: deflate in loop returned %d\n",
+ pr_debug("BTRFS: deflate in loop returned %d\n",
ret);
ret = -EIO;
goto out;
@@ -356,7 +356,7 @@ cont:
if (need_unmap)
kunmap(pages_in[page_in_index - 1]);
if (ret != LZO_E_OK) {
- printk(KERN_WARNING "BTRFS: decompress failed\n");
+ pr_warn("BTRFS: decompress failed\n");
ret = -EIO;
break;
}
@@ -402,7 +402,7 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
out_len = PAGE_SIZE;
ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
if (ret != LZO_E_OK) {
- printk(KERN_WARNING "BTRFS: decompress failed!\n");
+ pr_warn("BTRFS: decompress failed!\n");
ret = -EIO;
goto out;
}
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 3b78d38173b3..b2d1e95de7be 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -67,8 +67,8 @@ static void ordered_data_tree_panic(struct inode *inode, int errno,
u64 offset)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
- "%llu", offset);
+ btrfs_panic(fs_info, errno,
+ "Inconsistency in ordered tree at offset %llu", offset);
}
/*
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 147dc6ca5de1..438575ea8d25 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -24,12 +24,11 @@ static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk)
{
int num_stripes = btrfs_chunk_num_stripes(eb, chunk);
int i;
- printk(KERN_INFO "\t\tchunk length %llu owner %llu type %llu "
- "num_stripes %d\n",
+ pr_info("\t\tchunk length %llu owner %llu type %llu num_stripes %d\n",
btrfs_chunk_length(eb, chunk), btrfs_chunk_owner(eb, chunk),
btrfs_chunk_type(eb, chunk), num_stripes);
for (i = 0 ; i < num_stripes ; i++) {
- printk(KERN_INFO "\t\t\tstripe %d devid %llu offset %llu\n", i,
+ pr_info("\t\t\tstripe %d devid %llu offset %llu\n", i,
btrfs_stripe_devid_nr(eb, chunk, i),
btrfs_stripe_offset_nr(eb, chunk, i));
}
@@ -37,8 +36,7 @@ static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk)
static void print_dev_item(struct extent_buffer *eb,
struct btrfs_dev_item *dev_item)
{
- printk(KERN_INFO "\t\tdev item devid %llu "
- "total_bytes %llu bytes used %llu\n",
+ pr_info("\t\tdev item devid %llu total_bytes %llu bytes used %llu\n",
btrfs_device_id(eb, dev_item),
btrfs_device_total_bytes(eb, dev_item),
btrfs_device_bytes_used(eb, dev_item));
@@ -46,8 +44,7 @@ static void print_dev_item(struct extent_buffer *eb,
static void print_extent_data_ref(struct extent_buffer *eb,
struct btrfs_extent_data_ref *ref)
{
- printk(KERN_INFO "\t\textent data backref root %llu "
- "objectid %llu offset %llu count %u\n",
+ pr_info("\t\textent data backref root %llu objectid %llu offset %llu count %u\n",
btrfs_extent_data_ref_root(eb, ref),
btrfs_extent_data_ref_objectid(eb, ref),
btrfs_extent_data_ref_offset(eb, ref),
@@ -72,7 +69,7 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
struct btrfs_extent_item_v0 *ei0;
BUG_ON(item_size != sizeof(*ei0));
ei0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_item_v0);
- printk(KERN_INFO "\t\textent refs %u\n",
+ pr_info("\t\textent refs %u\n",
btrfs_extent_refs_v0(eb, ei0));
return;
#else
@@ -83,7 +80,7 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
flags = btrfs_extent_flags(eb, ei);
- printk(KERN_INFO "\t\textent refs %llu gen %llu flags %llu\n",
+ pr_info("\t\textent refs %llu gen %llu flags %llu\n",
btrfs_extent_refs(eb, ei), btrfs_extent_generation(eb, ei),
flags);
@@ -92,8 +89,7 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
struct btrfs_tree_block_info *info;
info = (struct btrfs_tree_block_info *)(ei + 1);
btrfs_tree_block_key(eb, info, &key);
- printk(KERN_INFO "\t\ttree block key (%llu %u %llu) "
- "level %d\n",
+ pr_info("\t\ttree block key (%llu %u %llu) level %d\n",
btrfs_disk_key_objectid(&key), key.type,
btrfs_disk_key_offset(&key),
btrfs_tree_block_level(eb, info));
@@ -110,12 +106,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
offset = btrfs_extent_inline_ref_offset(eb, iref);
switch (type) {
case BTRFS_TREE_BLOCK_REF_KEY:
- printk(KERN_INFO "\t\ttree block backref "
- "root %llu\n", offset);
+ pr_info("\t\ttree block backref root %llu\n", offset);
break;
case BTRFS_SHARED_BLOCK_REF_KEY:
- printk(KERN_INFO "\t\tshared block backref "
- "parent %llu\n", offset);
+ pr_info("\t\tshared block backref parent %llu\n", offset);
break;
case BTRFS_EXTENT_DATA_REF_KEY:
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
@@ -123,8 +117,7 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
break;
case BTRFS_SHARED_DATA_REF_KEY:
sref = (struct btrfs_shared_data_ref *)(iref + 1);
- printk(KERN_INFO "\t\tshared data backref "
- "parent %llu count %u\n",
+ pr_info("\t\tshared data backref parent %llu count %u\n",
offset, btrfs_shared_data_ref_count(eb, sref));
break;
default:
@@ -141,8 +134,7 @@ static void print_extent_ref_v0(struct extent_buffer *eb, int slot)
struct btrfs_extent_ref_v0 *ref0;
ref0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_ref_v0);
- printk("\t\textent back ref root %llu gen %llu "
- "owner %llu num_refs %lu\n",
+ printk("\t\textent back ref root %llu gen %llu owner %llu num_refs %lu\n",
btrfs_ref_root_v0(eb, ref0),
btrfs_ref_generation_v0(eb, ref0),
btrfs_ref_objectid_v0(eb, ref0),
@@ -162,7 +154,7 @@ static void print_uuid_item(struct extent_buffer *l, unsigned long offset,
__le64 subvol_id;
read_extent_buffer(l, &subvol_id, offset, sizeof(subvol_id));
- printk(KERN_INFO "\t\tsubvol_id %llu\n",
+ pr_info("\t\tsubvol_id %llu\n",
(unsigned long long)le64_to_cpu(subvol_id));
item_size -= sizeof(u64);
offset += sizeof(u64);
@@ -196,15 +188,13 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
item = btrfs_item_nr(i);
btrfs_item_key_to_cpu(l, &key, i);
type = key.type;
- printk(KERN_INFO "\titem %d key (%llu %u %llu) itemoff %d "
- "itemsize %d\n",
+ pr_info("\titem %d key (%llu %u %llu) itemoff %d itemsize %d\n",
i, key.objectid, type, key.offset,
btrfs_item_offset(l, item), btrfs_item_size(l, item));
switch (type) {
case BTRFS_INODE_ITEM_KEY:
ii = btrfs_item_ptr(l, i, struct btrfs_inode_item);
- printk(KERN_INFO "\t\tinode generation %llu size %llu "
- "mode %o\n",
+ pr_info("\t\tinode generation %llu size %llu mode %o\n",
btrfs_inode_generation(l, ii),
btrfs_inode_size(l, ii),
btrfs_inode_mode(l, ii));
@@ -212,13 +202,13 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
case BTRFS_DIR_ITEM_KEY:
di = btrfs_item_ptr(l, i, struct btrfs_dir_item);
btrfs_dir_item_key_to_cpu(l, di, &found_key);
- printk(KERN_INFO "\t\tdir oid %llu type %u\n",
+ pr_info("\t\tdir oid %llu type %u\n",
found_key.objectid,
btrfs_dir_type(l, di));
break;
case BTRFS_ROOT_ITEM_KEY:
ri = btrfs_item_ptr(l, i, struct btrfs_root_item);
- printk(KERN_INFO "\t\troot data bytenr %llu refs %u\n",
+ pr_info("\t\troot data bytenr %llu refs %u\n",
btrfs_disk_root_bytenr(l, ri),
btrfs_disk_root_refs(l, ri));
break;
@@ -227,10 +217,10 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
print_extent_item(l, i, type);
break;
case BTRFS_TREE_BLOCK_REF_KEY:
- printk(KERN_INFO "\t\ttree block backref\n");
+ pr_info("\t\ttree block backref\n");
break;
case BTRFS_SHARED_BLOCK_REF_KEY:
- printk(KERN_INFO "\t\tshared block backref\n");
+ pr_info("\t\tshared block backref\n");
break;
case BTRFS_EXTENT_DATA_REF_KEY:
dref = btrfs_item_ptr(l, i,
@@ -240,7 +230,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
case BTRFS_SHARED_DATA_REF_KEY:
sref = btrfs_item_ptr(l, i,
struct btrfs_shared_data_ref);
- printk(KERN_INFO "\t\tshared data backref count %u\n",
+ pr_info("\t\tshared data backref count %u\n",
btrfs_shared_data_ref_count(l, sref));
break;
case BTRFS_EXTENT_DATA_KEY:
@@ -248,17 +238,14 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
struct btrfs_file_extent_item);
if (btrfs_file_extent_type(l, fi) ==
BTRFS_FILE_EXTENT_INLINE) {
- printk(KERN_INFO "\t\tinline extent data "
- "size %u\n",
+ pr_info("\t\tinline extent data size %u\n",
btrfs_file_extent_inline_len(l, i, fi));
break;
}
- printk(KERN_INFO "\t\textent data disk bytenr %llu "
- "nr %llu\n",
+ pr_info("\t\textent data disk bytenr %llu nr %llu\n",
btrfs_file_extent_disk_bytenr(l, fi),
btrfs_file_extent_disk_num_bytes(l, fi));
- printk(KERN_INFO "\t\textent data offset %llu "
- "nr %llu ram %llu\n",
+ pr_info("\t\textent data offset %llu nr %llu ram %llu\n",
btrfs_file_extent_offset(l, fi),
btrfs_file_extent_num_bytes(l, fi),
btrfs_file_extent_ram_bytes(l, fi));
@@ -273,7 +260,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
case BTRFS_BLOCK_GROUP_ITEM_KEY:
bi = btrfs_item_ptr(l, i,
struct btrfs_block_group_item);
- printk(KERN_INFO "\t\tblock group used %llu\n",
+ pr_info("\t\tblock group used %llu\n",
btrfs_disk_block_group_used(l, bi));
break;
case BTRFS_CHUNK_ITEM_KEY:
@@ -287,38 +274,36 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
case BTRFS_DEV_EXTENT_KEY:
dev_extent = btrfs_item_ptr(l, i,
struct btrfs_dev_extent);
- printk(KERN_INFO "\t\tdev extent chunk_tree %llu\n"
- "\t\tchunk objectid %llu chunk offset %llu "
- "length %llu\n",
+ pr_info("\t\tdev extent chunk_tree %llu\n\t\tchunk objectid %llu chunk offset %llu length %llu\n",
btrfs_dev_extent_chunk_tree(l, dev_extent),
btrfs_dev_extent_chunk_objectid(l, dev_extent),
btrfs_dev_extent_chunk_offset(l, dev_extent),
btrfs_dev_extent_length(l, dev_extent));
break;
case BTRFS_PERSISTENT_ITEM_KEY:
- printk(KERN_INFO "\t\tpersistent item objectid %llu offset %llu\n",
+ pr_info("\t\tpersistent item objectid %llu offset %llu\n",
key.objectid, key.offset);
switch (key.objectid) {
case BTRFS_DEV_STATS_OBJECTID:
- printk(KERN_INFO "\t\tdevice stats\n");
+ pr_info("\t\tdevice stats\n");
break;
default:
- printk(KERN_INFO "\t\tunknown persistent item\n");
+ pr_info("\t\tunknown persistent item\n");
}
break;
case BTRFS_TEMPORARY_ITEM_KEY:
- printk(KERN_INFO "\t\ttemporary item objectid %llu offset %llu\n",
+ pr_info("\t\ttemporary item objectid %llu offset %llu\n",
key.objectid, key.offset);
switch (key.objectid) {
case BTRFS_BALANCE_OBJECTID:
- printk(KERN_INFO "\t\tbalance status\n");
+ pr_info("\t\tbalance status\n");
break;
default:
- printk(KERN_INFO "\t\tunknown temporary item\n");
+ pr_info("\t\tunknown temporary item\n");
}
break;
case BTRFS_DEV_REPLACE_KEY:
- printk(KERN_INFO "\t\tdev replace\n");
+ pr_info("\t\tdev replace\n");
break;
case BTRFS_UUID_KEY_SUBVOL:
case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
@@ -343,12 +328,13 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
btrfs_print_leaf(root, c);
return;
}
- btrfs_info(root->fs_info, "node %llu level %d total ptrs %d free spc %u",
- btrfs_header_bytenr(c), level, nr,
- (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
+ btrfs_info(root->fs_info,
+ "node %llu level %d total ptrs %d free spc %u",
+ btrfs_header_bytenr(c), level, nr,
+ (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
for (i = 0; i < nr; i++) {
btrfs_node_key_to_cpu(c, &key, i);
- printk(KERN_INFO "\tkey %d (%llu %u %llu) block %llu\n",
+ pr_info("\tkey %d (%llu %u %llu) block %llu\n",
i, key.objectid, key.type, key.offset,
btrfs_node_blockptr(c, i));
}
@@ -356,6 +342,13 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
struct extent_buffer *next = read_tree_block(root,
btrfs_node_blockptr(c, i),
btrfs_node_ptr_generation(c, i));
+ if (IS_ERR(next)) {
+ continue;
+ } else if (!extent_buffer_uptodate(next)) {
+ free_extent_buffer(next);
+ continue;
+ }
+
if (btrfs_is_leaf(next) &&
level != 1)
BUG();
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 8db2e29fdcf4..11f4fffe503e 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -309,7 +309,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
u64 flags = 0;
u64 rescan_progress = 0;
- if (!fs_info->quota_enabled)
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
return 0;
fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
@@ -360,8 +360,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
fs_info->generation) {
flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
btrfs_err(fs_info,
- "qgroup generation mismatch, "
- "marked as inconsistent");
+ "qgroup generation mismatch, marked as inconsistent");
}
fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
ptr);
@@ -463,13 +462,11 @@ next2:
}
out:
fs_info->qgroup_flags |= flags;
- if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
- fs_info->quota_enabled = 0;
- fs_info->pending_quota_state = 0;
- } else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
- ret >= 0) {
+ if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
+ clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
+ ret >= 0)
ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
- }
btrfs_free_path(path);
if (ret < 0) {
@@ -847,7 +844,7 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
}
ret = 0;
out:
- root->fs_info->pending_quota_state = 0;
+ set_bit(BTRFS_FS_QUOTA_DISABLING, &root->fs_info->flags);
btrfs_free_path(path);
return ret;
}
@@ -868,7 +865,7 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (fs_info->quota_root) {
- fs_info->pending_quota_state = 1;
+ set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags);
goto out;
}
@@ -964,7 +961,7 @@ out_add_root:
}
spin_lock(&fs_info->qgroup_lock);
fs_info->quota_root = quota_root;
- fs_info->pending_quota_state = 1;
+ set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags);
spin_unlock(&fs_info->qgroup_lock);
out_free_path:
btrfs_free_path(path);
@@ -993,8 +990,8 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root)
goto out;
- fs_info->quota_enabled = 0;
- fs_info->pending_quota_state = 0;
+ clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ set_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags);
btrfs_qgroup_wait_for_completion(fs_info, false);
spin_lock(&fs_info->qgroup_lock);
quota_root = fs_info->quota_root;
@@ -1490,7 +1487,8 @@ int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_root *delayed_refs;
int ret;
- if (!fs_info->quota_enabled || bytenr == 0 || num_bytes == 0)
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
+ || bytenr == 0 || num_bytes == 0)
return 0;
if (WARN_ON(trans == NULL))
return -EINVAL;
@@ -1713,7 +1711,7 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
if (old_roots)
nr_old_roots = old_roots->nnodes;
- if (!fs_info->quota_enabled)
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
goto out_free;
BUG_ON(!fs_info->quota_root);
@@ -1833,10 +1831,14 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
if (!quota_root)
goto out;
- if (!fs_info->quota_enabled && fs_info->pending_quota_state)
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
+ test_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
start_rescan_worker = 1;
- fs_info->quota_enabled = fs_info->pending_quota_state;
+ if (test_and_clear_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
+ set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ if (test_and_clear_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags))
+ clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
spin_lock(&fs_info->qgroup_lock);
while (!list_empty(&fs_info->dirty_qgroups)) {
@@ -1855,7 +1857,7 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
spin_lock(&fs_info->qgroup_lock);
}
- if (fs_info->quota_enabled)
+ if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
else
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
@@ -1900,7 +1902,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
u64 nums;
mutex_lock(&fs_info->qgroup_ioctl_lock);
- if (!fs_info->quota_enabled)
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
goto out;
if (!quota_root) {
@@ -1991,8 +1993,9 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
if (ret) {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
- btrfs_info(fs_info, "unable to update quota limit for %llu",
- dstgroup->qgroupid);
+ btrfs_info(fs_info,
+ "unable to update quota limit for %llu",
+ dstgroup->qgroupid);
goto unlock;
}
}
@@ -2226,8 +2229,7 @@ void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
return;
btrfs_err(trans->fs_info,
- "qgroups not uptodate in trans handle %p: list is%s empty, "
- "seq is %#x.%x",
+ "qgroups not uptodate in trans handle %p: list is%s empty, seq is %#x.%x",
trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
(u32)(trans->delayed_ref_elem.seq >> 32),
(u32)trans->delayed_ref_elem.seq);
@@ -2255,10 +2257,11 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
&fs_info->qgroup_rescan_progress,
path, 1, 0);
- pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
- fs_info->qgroup_rescan_progress.objectid,
- fs_info->qgroup_rescan_progress.type,
- fs_info->qgroup_rescan_progress.offset, ret);
+ btrfs_debug(fs_info,
+ "current progress key (%llu %u %llu), search_slot ret %d",
+ fs_info->qgroup_rescan_progress.objectid,
+ fs_info->qgroup_rescan_progress.type,
+ fs_info->qgroup_rescan_progress.offset, ret);
if (ret) {
/*
@@ -2347,7 +2350,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
err = PTR_ERR(trans);
break;
}
- if (!fs_info->quota_enabled) {
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
err = -EINTR;
} else {
err = qgroup_rescan_leaf(fs_info, path, trans);
@@ -2388,7 +2391,7 @@ out:
ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
if (ret < 0) {
err = ret;
- btrfs_err(fs_info, "fail to update qgroup status: %d\n", err);
+ btrfs_err(fs_info, "fail to update qgroup status: %d", err);
}
btrfs_end_transaction(trans, fs_info->quota_root);
@@ -2578,8 +2581,8 @@ int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len)
struct ulist_iterator uiter;
int ret;
- if (!root->fs_info->quota_enabled || !is_fstree(root->objectid) ||
- len == 0)
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+ !is_fstree(root->objectid) || len == 0)
return 0;
changeset.bytes_changed = 0;
@@ -2676,8 +2679,8 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes)
{
int ret;
- if (!root->fs_info->quota_enabled || !is_fstree(root->objectid) ||
- num_bytes == 0)
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+ !is_fstree(root->objectid) || num_bytes == 0)
return 0;
BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
@@ -2692,7 +2695,8 @@ void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
{
int reserved;
- if (!root->fs_info->quota_enabled || !is_fstree(root->objectid))
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+ !is_fstree(root->objectid))
return;
reserved = atomic_xchg(&root->qgroup_meta_rsv, 0);
@@ -2703,7 +2707,8 @@ void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
{
- if (!root->fs_info->quota_enabled || !is_fstree(root->objectid))
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+ !is_fstree(root->objectid))
return;
BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index cd8d302a1f61..d016d4a79864 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -2143,7 +2143,10 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
rbio->faila = find_logical_bio_stripe(rbio, bio);
if (rbio->faila == -1) {
- BUG();
+ btrfs_warn(root->fs_info,
+ "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
+ __func__, (u64)bio->bi_iter.bi_sector << 9,
+ (u64)bio->bi_iter.bi_size, bbio->map_type);
if (generic_io)
btrfs_put_bbio(bbio);
kfree(rbio);
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 8428db7cd88f..75bab76739be 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -820,7 +820,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
spin_lock(&fs_info->reada_lock);
list_for_each_entry(device, &fs_devices->devices, dev_list) {
- printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid,
+ btrfs_debug(fs_info, "dev %lld has %d in flight", device->devid,
atomic_read(&device->reada_in_flight));
index = 0;
while (1) {
@@ -829,17 +829,17 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
(void **)&zone, index, 1);
if (ret == 0)
break;
- printk(KERN_DEBUG " zone %llu-%llu elems %llu locked "
- "%d devs", zone->start, zone->end, zone->elems,
- zone->locked);
+ pr_debug(" zone %llu-%llu elems %llu locked %d devs",
+ zone->start, zone->end, zone->elems,
+ zone->locked);
for (j = 0; j < zone->ndevs; ++j) {
- printk(KERN_CONT " %lld",
+ pr_cont(" %lld",
zone->devs[j]->devid);
}
if (device->reada_curr_zone == zone)
- printk(KERN_CONT " curr off %llu",
+ pr_cont(" curr off %llu",
device->reada_next - zone->start);
- printk(KERN_CONT "\n");
+ pr_cont("\n");
index = (zone->end >> PAGE_SHIFT) + 1;
}
cnt = 0;
@@ -851,21 +851,20 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
(void **)&re, index, 1);
if (ret == 0)
break;
- printk(KERN_DEBUG
- " re: logical %llu size %u empty %d scheduled %d",
+ pr_debug(" re: logical %llu size %u empty %d scheduled %d",
re->logical, fs_info->tree_root->nodesize,
list_empty(&re->extctl), re->scheduled);
for (i = 0; i < re->nzones; ++i) {
- printk(KERN_CONT " zone %llu-%llu devs",
+ pr_cont(" zone %llu-%llu devs",
re->zones[i]->start,
re->zones[i]->end);
for (j = 0; j < re->zones[i]->ndevs; ++j) {
- printk(KERN_CONT " %lld",
+ pr_cont(" %lld",
re->zones[i]->devs[j]->devid);
}
}
- printk(KERN_CONT "\n");
+ pr_cont("\n");
index = (re->logical >> PAGE_SHIFT) + 1;
if (++cnt > 15)
break;
@@ -885,20 +884,19 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
index = (re->logical >> PAGE_SHIFT) + 1;
continue;
}
- printk(KERN_DEBUG
- "re: logical %llu size %u list empty %d scheduled %d",
+ pr_debug("re: logical %llu size %u list empty %d scheduled %d",
re->logical, fs_info->tree_root->nodesize,
list_empty(&re->extctl), re->scheduled);
for (i = 0; i < re->nzones; ++i) {
- printk(KERN_CONT " zone %llu-%llu devs",
+ pr_cont(" zone %llu-%llu devs",
re->zones[i]->start,
re->zones[i]->end);
for (j = 0; j < re->zones[i]->ndevs; ++j) {
- printk(KERN_CONT " %lld",
+ pr_cont(" %lld",
re->zones[i]->devs[j]->devid);
}
}
- printk(KERN_CONT "\n");
+ pr_cont("\n");
index = (re->logical >> PAGE_SHIFT) + 1;
}
spin_unlock(&fs_info->reada_lock);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index c0c13dc6fe12..0ec8ffa37ab0 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -337,8 +337,9 @@ static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
rb_node);
if (bnode->root)
fs_info = bnode->root->fs_info;
- btrfs_panic(fs_info, errno, "Inconsistency in backref cache "
- "found at offset %llu", bytenr);
+ btrfs_panic(fs_info, errno,
+ "Inconsistency in backref cache found at offset %llu",
+ bytenr);
}
/*
@@ -923,9 +924,16 @@ again:
path2->slots[level]--;
eb = path2->nodes[level];
- WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
- cur->bytenr);
-
+ if (btrfs_node_blockptr(eb, path2->slots[level]) !=
+ cur->bytenr) {
+ btrfs_err(root->fs_info,
+ "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
+ cur->bytenr, level - 1, root->objectid,
+ node_key->objectid, node_key->type,
+ node_key->offset);
+ err = -ENOENT;
+ goto out;
+ }
lower = cur;
need_check = true;
for (; level < BTRFS_MAX_LEVEL; level++) {
@@ -1296,9 +1304,9 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node) {
- btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found "
- "for start=%llu while inserting into relocation "
- "tree", node->bytenr);
+ btrfs_panic(root->fs_info, -EEXIST,
+ "Duplicate root found for start=%llu while inserting into relocation tree",
+ node->bytenr);
kfree(node);
return -EEXIST;
}
@@ -2350,6 +2358,10 @@ void free_reloc_roots(struct list_head *list)
while (!list_empty(list)) {
reloc_root = list_entry(list->next, struct btrfs_root,
root_list);
+ free_extent_buffer(reloc_root->node);
+ free_extent_buffer(reloc_root->commit_root);
+ reloc_root->node = NULL;
+ reloc_root->commit_root = NULL;
__del_reloc_root(reloc_root);
}
}
@@ -2686,11 +2698,15 @@ static int do_relocation(struct btrfs_trans_handle *trans,
if (!upper->eb) {
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
- if (ret < 0) {
- err = ret;
+ if (ret) {
+ if (ret < 0)
+ err = ret;
+ else
+ err = -ENOENT;
+
+ btrfs_release_path(path);
break;
}
- BUG_ON(ret > 0);
if (!upper->eb) {
upper->eb = path->nodes[upper->level];
@@ -3203,7 +3219,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
nr++;
}
- btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
+ btrfs_set_extent_delalloc(inode, page_start, page_end, NULL, 0);
set_page_dirty(page);
unlock_extent(&BTRFS_I(inode)->io_tree,
@@ -3952,7 +3968,7 @@ static int qgroup_fix_relocated_data_extents(struct btrfs_trans_handle *trans,
struct btrfs_key key;
int ret = 0;
- if (!fs_info->quota_enabled)
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
return 0;
/*
@@ -4365,8 +4381,9 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
goto out;
}
- btrfs_info(extent_root->fs_info, "relocating block group %llu flags %llu",
- rc->block_group->key.objectid, rc->block_group->flags);
+ btrfs_info(extent_root->fs_info,
+ "relocating block group %llu flags %llu",
+ rc->block_group->key.objectid, rc->block_group->flags);
btrfs_wait_block_group_reservations(rc->block_group);
btrfs_wait_nocow_writers(rc->block_group);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 091296062456..edae751e870c 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -46,12 +46,7 @@ static void btrfs_read_root_item(struct extent_buffer *eb, int slot,
!= btrfs_root_generation_v2(item)) {
if (btrfs_root_generation_v2(item) != 0) {
btrfs_warn(eb->fs_info,
- "mismatching "
- "generation and generation_v2 "
- "found in root item. This root "
- "was probably mounted with an "
- "older kernel. Resetting all "
- "new fields.");
+ "mismatching generation and generation_v2 found in root item. This root was probably mounted with an older kernel. Resetting all new fields.");
}
need_reset = 1;
}
@@ -156,8 +151,9 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
if (ret != 0) {
btrfs_print_leaf(root, path->nodes[0]);
- btrfs_crit(root->fs_info, "unable to update root key %llu %u %llu",
- key->objectid, key->type, key->offset);
+ btrfs_crit(root->fs_info,
+ "unable to update root key %llu %u %llu",
+ key->objectid, key->type, key->offset);
BUG_ON(1);
}
@@ -302,8 +298,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
btrfs_handle_fs_error(tree_root->fs_info, err,
- "Failed to start trans to delete "
- "orphan item");
+ "Failed to start trans to delete orphan item");
break;
}
err = btrfs_del_orphan_item(trans, tree_root,
@@ -311,8 +306,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
btrfs_end_transaction(trans, tree_root);
if (err) {
btrfs_handle_fs_error(tree_root->fs_info, err,
- "Failed to delete root orphan "
- "item");
+ "Failed to delete root orphan item");
break;
}
continue;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 1d195d2b32c6..fffb9ab8526e 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -575,23 +575,25 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
* hold all of the paths here
*/
for (i = 0; i < ipath->fspath->elem_cnt; ++i)
- btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev "
- "%s, sector %llu, root %llu, inode %llu, offset %llu, "
- "length %llu, links %u (path: %s)", swarn->errstr,
- swarn->logical, rcu_str_deref(swarn->dev->name),
- (unsigned long long)swarn->sector, root, inum, offset,
- min(isize - offset, (u64)PAGE_SIZE), nlink,
- (char *)(unsigned long)ipath->fspath->val[i]);
+ btrfs_warn_in_rcu(fs_info,
+ "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
+ swarn->errstr, swarn->logical,
+ rcu_str_deref(swarn->dev->name),
+ (unsigned long long)swarn->sector,
+ root, inum, offset,
+ min(isize - offset, (u64)PAGE_SIZE), nlink,
+ (char *)(unsigned long)ipath->fspath->val[i]);
free_ipath(ipath);
return 0;
err:
- btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev "
- "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
- "resolving failed with ret=%d", swarn->errstr,
- swarn->logical, rcu_str_deref(swarn->dev->name),
- (unsigned long long)swarn->sector, root, inum, offset, ret);
+ btrfs_warn_in_rcu(fs_info,
+ "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
+ swarn->errstr, swarn->logical,
+ rcu_str_deref(swarn->dev->name),
+ (unsigned long long)swarn->sector,
+ root, inum, offset, ret);
free_ipath(ipath);
return 0;
@@ -645,9 +647,8 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
item_size, &ref_root,
&ref_level);
btrfs_warn_in_rcu(fs_info,
- "%s at logical %llu on dev %s, "
- "sector %llu: metadata %s (level %d) in tree "
- "%llu", errstr, swarn.logical,
+ "%s at logical %llu on dev %s, sector %llu: metadata %s (level %d) in tree %llu",
+ errstr, swarn.logical,
rcu_str_deref(dev->name),
(unsigned long long)swarn.sector,
ref_level ? "node" : "leaf",
@@ -1574,8 +1575,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
if (!page_bad->dev->bdev) {
btrfs_warn_rl(sblock_bad->sctx->dev_root->fs_info,
- "scrub_repair_page_from_good_copy(bdev == NULL) "
- "is unexpected");
+ "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
return -EIO;
}
@@ -2961,7 +2961,8 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
(key.objectid < logic_start ||
key.objectid + bytes >
logic_start + map->stripe_len)) {
- btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
+ btrfs_err(fs_info,
+ "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
key.objectid, logic_start);
spin_lock(&sctx->stat_lock);
sctx->stat.uncorrectable_errors++;
@@ -3312,8 +3313,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
key.objectid + bytes >
logical + map->stripe_len)) {
btrfs_err(fs_info,
- "scrub: tree block %llu spanning "
- "stripes, ignored. logical=%llu",
+ "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
key.objectid, logical);
spin_lock(&sctx->stat_lock);
sctx->stat.uncorrectable_errors++;
@@ -3640,7 +3640,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
*/
ro_set = 0;
} else {
- btrfs_warn(fs_info, "failed setting block group ro, ret=%d\n",
+ btrfs_warn(fs_info,
+ "failed setting block group ro, ret=%d\n",
ret);
btrfs_put_block_group(cache);
break;
@@ -3861,8 +3862,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
/* not supported for data w/o checksums */
btrfs_err_rl(fs_info,
- "scrub: size assumption sectorsize != PAGE_SIZE "
- "(%d != %lu) fails",
+ "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
fs_info->chunk_root->sectorsize, PAGE_SIZE);
return -EINVAL;
}
@@ -3875,8 +3875,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
* would exhaust the array bounds of pagev member in
* struct scrub_block
*/
- btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
- "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
+ btrfs_err(fs_info,
+ "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
fs_info->chunk_root->nodesize,
SCRUB_MAX_PAGES_PER_BLOCK,
fs_info->chunk_root->sectorsize,
@@ -4202,10 +4202,10 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
ret = iterate_inodes_from_logical(logical, fs_info, path,
record_inode_for_nocow, nocow_ctx);
if (ret != 0 && ret != -ENOENT) {
- btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
- "phys %llu, len %llu, mir %u, ret %d",
- logical, physical_for_dev_replace, len, mirror_num,
- ret);
+ btrfs_warn(fs_info,
+ "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
+ logical, physical_for_dev_replace, len, mirror_num,
+ ret);
not_written = 1;
goto out;
}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 1379e59277e2..01bc36cec26e 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -36,10 +36,6 @@
#include "transaction.h"
#include "compression.h"
-static int g_verbose = 0;
-
-#define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
-
/*
* A fs_path is a helper to dynamically build path names with unknown size.
* It reallocates the internal buffer on demand.
@@ -727,9 +723,10 @@ static int send_cmd(struct send_ctx *sctx)
static int send_rename(struct send_ctx *sctx,
struct fs_path *from, struct fs_path *to)
{
+ struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret;
-verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start);
+ btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
if (ret < 0)
@@ -751,9 +748,10 @@ out:
static int send_link(struct send_ctx *sctx,
struct fs_path *path, struct fs_path *lnk)
{
+ struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret;
-verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start);
+ btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
if (ret < 0)
@@ -774,9 +772,10 @@ out:
*/
static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
{
+ struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret;
-verbose_printk("btrfs: send_unlink %s\n", path->start);
+ btrfs_debug(fs_info, "send_unlink %s", path->start);
ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
if (ret < 0)
@@ -796,9 +795,10 @@ out:
*/
static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
{
+ struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret;
-verbose_printk("btrfs: send_rmdir %s\n", path->start);
+ btrfs_debug(fs_info, "send_rmdir %s", path->start);
ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
if (ret < 0)
@@ -1313,6 +1313,7 @@ static int find_extent_clone(struct send_ctx *sctx,
u64 ino_size,
struct clone_root **found)
{
+ struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret;
int extent_type;
u64 logical;
@@ -1371,10 +1372,10 @@ static int find_extent_clone(struct send_ctx *sctx,
}
logical = disk_byte + btrfs_file_extent_offset(eb, fi);
- down_read(&sctx->send_root->fs_info->commit_root_sem);
- ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path,
+ down_read(&fs_info->commit_root_sem);
+ ret = extent_from_logical(fs_info, disk_byte, tmp_path,
&found_key, &flags);
- up_read(&sctx->send_root->fs_info->commit_root_sem);
+ up_read(&fs_info->commit_root_sem);
btrfs_release_path(tmp_path);
if (ret < 0)
@@ -1429,7 +1430,7 @@ static int find_extent_clone(struct send_ctx *sctx,
extent_item_pos = logical - found_key.objectid;
else
extent_item_pos = 0;
- ret = iterate_extent_inodes(sctx->send_root->fs_info,
+ ret = iterate_extent_inodes(fs_info,
found_key.objectid, extent_item_pos, 1,
__iterate_backrefs, backref_ctx);
@@ -1439,20 +1440,18 @@ static int find_extent_clone(struct send_ctx *sctx,
if (!backref_ctx->found_itself) {
/* found a bug in backref code? */
ret = -EIO;
- btrfs_err(sctx->send_root->fs_info, "did not find backref in "
- "send_root. inode=%llu, offset=%llu, "
- "disk_byte=%llu found extent=%llu",
- ino, data_offset, disk_byte, found_key.objectid);
+ btrfs_err(fs_info,
+ "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
+ ino, data_offset, disk_byte, found_key.objectid);
goto out;
}
-verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
- "ino=%llu, "
- "num_bytes=%llu, logical=%llu\n",
- data_offset, ino, num_bytes, logical);
+ btrfs_debug(fs_info,
+ "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
+ data_offset, ino, num_bytes, logical);
if (!backref_ctx->found)
- verbose_printk("btrfs: no clones found\n");
+ btrfs_debug(fs_info, "no clones found");
cur_clone_root = NULL;
for (i = 0; i < sctx->clone_roots_cnt; i++) {
@@ -2423,10 +2422,11 @@ out:
static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
{
+ struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p;
-verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size);
+ btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
p = fs_path_alloc();
if (!p)
@@ -2452,10 +2452,11 @@ out:
static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
{
+ struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p;
-verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode);
+ btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
p = fs_path_alloc();
if (!p)
@@ -2481,10 +2482,12 @@ out:
static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
{
+ struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p;
-verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid);
+ btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
+ ino, uid, gid);
p = fs_path_alloc();
if (!p)
@@ -2511,6 +2514,7 @@ out:
static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
{
+ struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p = NULL;
struct btrfs_inode_item *ii;
@@ -2519,7 +2523,7 @@ static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
struct btrfs_key key;
int slot;
-verbose_printk("btrfs: send_utimes %llu\n", ino);
+ btrfs_debug(fs_info, "send_utimes %llu", ino);
p = fs_path_alloc();
if (!p)
@@ -2573,6 +2577,7 @@ out:
*/
static int send_create_inode(struct send_ctx *sctx, u64 ino)
{
+ struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p;
int cmd;
@@ -2580,7 +2585,7 @@ static int send_create_inode(struct send_ctx *sctx, u64 ino)
u64 mode;
u64 rdev;
-verbose_printk("btrfs: send_create_inode %llu\n", ino);
+ btrfs_debug(fs_info, "send_create_inode %llu", ino);
p = fs_path_alloc();
if (!p)
@@ -3638,6 +3643,7 @@ out:
*/
static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
{
+ struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct recorded_ref *cur;
struct recorded_ref *cur2;
@@ -3650,7 +3656,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
u64 last_dir_ino_rm = 0;
bool can_rename = true;
-verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
+ btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
/*
* This should never happen as the root dir always has the same ref
@@ -4398,12 +4404,8 @@ static int process_new_xattr(struct send_ctx *sctx)
static int process_deleted_xattr(struct send_ctx *sctx)
{
- int ret;
-
- ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
- sctx->cmp_key, __process_deleted_xattr, sctx);
-
- return ret;
+ return iterate_dir_item(sctx->parent_root, sctx->right_path,
+ sctx->cmp_key, __process_deleted_xattr, sctx);
}
struct find_xattr_ctx {
@@ -4664,6 +4666,7 @@ out:
*/
static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
{
+ struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
int ret = 0;
struct fs_path *p;
ssize_t num_read = 0;
@@ -4672,7 +4675,7 @@ static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
if (!p)
return -ENOMEM;
-verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
+ btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
num_read = fill_read_buf(sctx, offset, len);
if (num_read <= 0) {
@@ -4714,10 +4717,10 @@ static int send_clone(struct send_ctx *sctx,
struct fs_path *p;
u64 gen;
-verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
- "clone_inode=%llu, clone_offset=%llu\n", offset, len,
- clone_root->root->objectid, clone_root->ino,
- clone_root->offset);
+ btrfs_debug(sctx->send_root->fs_info,
+ "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
+ offset, len, clone_root->root->objectid, clone_root->ino,
+ clone_root->offset);
p = fs_path_alloc();
if (!p)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 4071fe2bd098..74ed5aae6cea 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -151,12 +151,11 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
vaf.fmt = fmt;
vaf.va = &args;
- printk(KERN_CRIT
- "BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n",
+ pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n",
sb->s_id, function, line, errno, errstr, &vaf);
va_end(args);
} else {
- printk(KERN_CRIT "BTRFS: error (device %s) in %s:%d: errno=%d %s\n",
+ pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s\n",
sb->s_id, function, line, errno, errstr);
}
#endif
@@ -462,9 +461,11 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
case Opt_datasum:
if (btrfs_test_opt(info, NODATASUM)) {
if (btrfs_test_opt(info, NODATACOW))
- btrfs_info(root->fs_info, "setting datasum, datacow enabled");
+ btrfs_info(root->fs_info,
+ "setting datasum, datacow enabled");
else
- btrfs_info(root->fs_info, "setting datasum");
+ btrfs_info(root->fs_info,
+ "setting datasum");
}
btrfs_clear_opt(info->mount_opt, NODATACOW);
btrfs_clear_opt(info->mount_opt, NODATASUM);
@@ -476,7 +477,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
btrfs_info(root->fs_info,
"setting nodatacow, compression disabled");
} else {
- btrfs_info(root->fs_info, "setting nodatacow");
+ btrfs_info(root->fs_info,
+ "setting nodatacow");
}
}
btrfs_clear_opt(info->mount_opt, COMPRESS);
@@ -608,8 +610,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
info->alloc_start = memparse(num, NULL);
mutex_unlock(&info->chunk_mutex);
kfree(num);
- btrfs_info(root->fs_info, "allocations start at %llu",
- info->alloc_start);
+ btrfs_info(root->fs_info,
+ "allocations start at %llu",
+ info->alloc_start);
} else {
ret = -ENOMEM;
goto out;
@@ -762,8 +765,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
goto out;
} else if (intarg >= 0) {
info->check_integrity_print_mask = intarg;
- btrfs_info(root->fs_info, "check_integrity_print_mask 0x%x",
- info->check_integrity_print_mask);
+ btrfs_info(root->fs_info,
+ "check_integrity_print_mask 0x%x",
+ info->check_integrity_print_mask);
} else {
ret = -EINVAL;
goto out;
@@ -794,19 +798,22 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
intarg = 0;
ret = match_int(&args[0], &intarg);
if (ret < 0) {
- btrfs_err(root->fs_info, "invalid commit interval");
+ btrfs_err(root->fs_info,
+ "invalid commit interval");
ret = -EINVAL;
goto out;
}
if (intarg > 0) {
if (intarg > 300) {
- btrfs_warn(root->fs_info, "excessive commit interval %d",
- intarg);
+ btrfs_warn(root->fs_info,
+ "excessive commit interval %d",
+ intarg);
}
info->commit_interval = intarg;
} else {
- btrfs_info(root->fs_info, "using default commit interval %ds",
- BTRFS_DEFAULT_COMMIT_INTERVAL);
+ btrfs_info(root->fs_info,
+ "using default commit interval %ds",
+ BTRFS_DEFAULT_COMMIT_INTERVAL);
info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
}
break;
@@ -827,7 +834,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
break;
#endif
case Opt_err:
- btrfs_info(root->fs_info, "unrecognized mount option '%s'", p);
+ btrfs_info(root->fs_info,
+ "unrecognized mount option '%s'", p);
ret = -EINVAL;
goto out;
default:
@@ -916,9 +924,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
}
break;
case Opt_subvolrootid:
- printk(KERN_WARNING
- "BTRFS: 'subvolrootid' mount option is deprecated and has "
- "no effect\n");
+ pr_warn("BTRFS: 'subvolrootid' mount option is deprecated and has no effect\n");
break;
case Opt_device:
device_name = match_strdup(&args[0]);
@@ -1142,7 +1148,7 @@ static int btrfs_fill_super(struct super_block *sb,
sb->s_iflags |= SB_I_CGROUPWB;
err = open_ctree(sb, fs_devices, (char *)data);
if (err) {
- printk(KERN_ERR "BTRFS: open_ctree failed\n");
+ btrfs_err(fs_info, "open_ctree failed");
return err;
}
@@ -1440,12 +1446,13 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
if (!IS_ERR(root)) {
struct super_block *s = root->d_sb;
+ struct btrfs_fs_info *fs_info = btrfs_sb(s);
struct inode *root_inode = d_inode(root);
u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid;
ret = 0;
if (!is_subvolume_inode(root_inode)) {
- pr_err("BTRFS: '%s' is not a valid subvolume\n",
+ btrfs_err(fs_info, "'%s' is not a valid subvolume",
subvol_name);
ret = -EINVAL;
}
@@ -1455,8 +1462,9 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
* subvolume which was passed by ID is renamed and
* another subvolume is renamed over the old location.
*/
- pr_err("BTRFS: subvol '%s' does not match subvolid %llu\n",
- subvol_name, subvol_objectid);
+ btrfs_err(fs_info,
+ "subvol '%s' does not match subvolid %llu",
+ subvol_name, subvol_objectid);
ret = -EINVAL;
}
if (ret) {
@@ -1830,13 +1838,15 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
btrfs_info(fs_info, "creating UUID tree");
ret = btrfs_create_uuid_tree(fs_info);
if (ret) {
- btrfs_warn(fs_info, "failed to create the UUID tree %d", ret);
+ btrfs_warn(fs_info,
+ "failed to create the UUID tree %d",
+ ret);
goto restore;
}
}
sb->s_flags &= ~MS_RDONLY;
- fs_info->open = 1;
+ set_bit(BTRFS_FS_OPEN, &fs_info->flags);
}
out:
wake_up_process(fs_info->transaction_kthread);
@@ -2346,7 +2356,7 @@ static void btrfs_interface_exit(void)
static void btrfs_print_mod_info(void)
{
- printk(KERN_INFO "Btrfs loaded, crc32c=%s"
+ pr_info("Btrfs loaded, crc32c=%s"
#ifdef CONFIG_BTRFS_DEBUG
", debug=on"
#endif
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index c6569905d3d1..1f157fba8940 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -77,7 +77,7 @@ static int can_modify_feature(struct btrfs_feature_attr *fa)
clear = BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR;
break;
default:
- printk(KERN_WARNING "btrfs: sysfs: unknown feature set %d\n",
+ pr_warn("btrfs: sysfs: unknown feature set %d\n",
fa->feature_set);
return 0;
}
@@ -430,7 +430,8 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->sectorsize);
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ fs_info->super_copy->sectorsize);
}
BTRFS_ATTR(sectorsize, btrfs_sectorsize_show);
@@ -440,7 +441,8 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->sectorsize);
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ fs_info->super_copy->sectorsize);
}
BTRFS_ATTR(clone_alignment, btrfs_clone_alignment_show);
@@ -836,9 +838,18 @@ static int btrfs_init_debugfs(void)
if (!btrfs_debugfs_root_dentry)
return -ENOMEM;
- debugfs_create_u64("test", S_IRUGO | S_IWUGO, btrfs_debugfs_root_dentry,
+ /*
+ * Example code, how to export data through debugfs.
+ *
+ * file: /sys/kernel/debug/btrfs/test
+ * contents of: btrfs_debugfs_test
+ */
+#ifdef CONFIG_BTRFS_DEBUG
+ debugfs_create_u64("test", S_IRUGO | S_IWUSR, btrfs_debugfs_root_dentry,
&btrfs_debugfs_test);
#endif
+
+#endif
return 0;
}
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 9f72aeda9220..0bf46808ce8f 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -968,7 +968,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
/* [BTRFS_MAX_EXTENT_SIZE] */
BTRFS_I(inode)->outstanding_extents++;
ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1,
- NULL);
+ NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
@@ -984,7 +984,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
BTRFS_I(inode)->outstanding_extents++;
ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
- NULL);
+ NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
@@ -1019,7 +1019,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
(BTRFS_MAX_EXTENT_SIZE >> 1)
+ sectorsize - 1,
- NULL);
+ NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
@@ -1042,7 +1042,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
ret = btrfs_set_extent_delalloc(inode,
BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
(BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
- NULL);
+ NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
@@ -1060,7 +1060,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
BTRFS_I(inode)->outstanding_extents++;
ret = btrfs_set_extent_delalloc(inode,
BTRFS_MAX_EXTENT_SIZE + sectorsize,
- BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
@@ -1097,7 +1097,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
BTRFS_I(inode)->outstanding_extents++;
ret = btrfs_set_extent_delalloc(inode,
BTRFS_MAX_EXTENT_SIZE + sectorsize,
- BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 4407fef7c16c..ca7cb5e6d385 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -480,7 +480,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
*/
root->fs_info->tree_root = root;
root->fs_info->quota_root = root;
- root->fs_info->quota_enabled = 1;
+ set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
/*
* Can't use bytenr 0, some things freak out
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index c294313ea2c8..9517de0e668c 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -65,8 +65,9 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
BUG_ON(!list_empty(&transaction->list));
WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
if (transaction->delayed_refs.pending_csums)
- printk(KERN_ERR "pending csums is %llu\n",
- transaction->delayed_refs.pending_csums);
+ btrfs_err(transaction->fs_info,
+ "pending csums is %llu",
+ transaction->delayed_refs.pending_csums);
while (!list_empty(&transaction->pending_chunks)) {
struct extent_map *em;
@@ -245,6 +246,7 @@ loop:
return -EROFS;
}
+ cur_trans->fs_info = fs_info;
atomic_set(&cur_trans->num_writers, 1);
extwriter_counter_init(cur_trans, type);
init_waitqueue_head(&cur_trans->writer_wait);
@@ -272,11 +274,9 @@ loop:
*/
smp_mb();
if (!list_empty(&fs_info->tree_mod_seq_list))
- WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when "
- "creating a fresh transaction\n");
+ WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
- WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
- "creating a fresh transaction\n");
+ WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
atomic64_set(&fs_info->tree_mod_seq, 0);
spin_lock_init(&cur_trans->delayed_refs.lock);
@@ -441,7 +441,7 @@ static void wait_current_trans(struct btrfs_root *root)
static int may_wait_transaction(struct btrfs_root *root, int type)
{
- if (root->fs_info->log_root_recovering)
+ if (test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags))
return 0;
if (type == TRANS_USERSPACE)
@@ -549,11 +549,8 @@ again:
}
} while (ret == -EBUSY);
- if (ret < 0) {
- /* We must get the transaction if we are JOIN_NOLOCK. */
- BUG_ON(type == TRANS_JOIN_NOLOCK);
+ if (ret < 0)
goto join_fail;
- }
cur_trans = root->fs_info->running_transaction;
@@ -993,7 +990,6 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
struct extent_state *cached_state = NULL;
u64 start = 0;
u64 end;
- struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
bool errors = false;
while (!find_first_extent_bit(dirty_pages, start, &start, &end,
@@ -1025,17 +1021,17 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
if ((mark & EXTENT_DIRTY) &&
- test_and_clear_bit(BTRFS_INODE_BTREE_LOG1_ERR,
- &btree_ino->runtime_flags))
+ test_and_clear_bit(BTRFS_FS_LOG1_ERR,
+ &root->fs_info->flags))
errors = true;
if ((mark & EXTENT_NEW) &&
- test_and_clear_bit(BTRFS_INODE_BTREE_LOG2_ERR,
- &btree_ino->runtime_flags))
+ test_and_clear_bit(BTRFS_FS_LOG2_ERR,
+ &root->fs_info->flags))
errors = true;
} else {
- if (test_and_clear_bit(BTRFS_INODE_BTREE_ERR,
- &btree_ino->runtime_flags))
+ if (test_and_clear_bit(BTRFS_FS_BTREE_ERR,
+ &root->fs_info->flags))
errors = true;
}
@@ -1300,11 +1296,11 @@ int btrfs_defrag_root(struct btrfs_root *root)
btrfs_btree_balance_dirty(info->tree_root);
cond_resched();
- if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
+ if (btrfs_fs_closing(info) || ret != -EAGAIN)
break;
- if (btrfs_defrag_cancelled(root->fs_info)) {
- pr_debug("BTRFS: defrag_root cancelled\n");
+ if (btrfs_defrag_cancelled(info)) {
+ btrfs_debug(info, "defrag_root cancelled");
ret = -EAGAIN;
break;
}
@@ -1335,7 +1331,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
* kick in anyway.
*/
mutex_lock(&fs_info->qgroup_ioctl_lock);
- if (!fs_info->quota_enabled) {
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
mutex_unlock(&fs_info->qgroup_ioctl_lock);
return 0;
}
@@ -1712,7 +1708,7 @@ static void update_super_roots(struct btrfs_root *root)
super->root_level = root_item->level;
if (btrfs_test_opt(root->fs_info, SPACE_CACHE))
super->cache_generation = root_item->generation;
- if (root->fs_info->update_uuid_tree_gen)
+ if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &root->fs_info->flags))
super->uuid_tree_generation = root_item->generation;
}
@@ -1919,7 +1915,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
{
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_transaction *prev_trans = NULL;
- struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
int ret;
/* Stop the commit early if ->aborted is set */
@@ -2213,8 +2208,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
btrfs_update_commit_device_size(root->fs_info);
btrfs_update_commit_device_bytes_used(root, cur_trans);
- clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
- clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
+ clear_bit(BTRFS_FS_LOG1_ERR, &root->fs_info->flags);
+ clear_bit(BTRFS_FS_LOG2_ERR, &root->fs_info->flags);
btrfs_trans_release_chunk_metadata(trans);
@@ -2328,7 +2323,7 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
list_del_init(&root->root_list);
spin_unlock(&fs_info->trans_lock);
- pr_debug("BTRFS: cleaner removing %llu\n", root->objectid);
+ btrfs_debug(fs_info, "cleaner removing %llu", root->objectid);
btrfs_kill_all_delayed_nodes(root);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index efb122643380..6cf0d37d4f76 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -82,6 +82,7 @@ struct btrfs_transaction {
spinlock_t dropped_roots_lock;
struct btrfs_delayed_ref_root delayed_refs;
int aborted;
+ struct btrfs_fs_info *fs_info;
};
#define __TRANS_FREEZABLE (1U << 0)
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 8a84ebd8e7cc..528cae123dc9 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -5579,7 +5579,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
if (!path)
return -ENOMEM;
- fs_info->log_root_recovering = 1;
+ set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
trans = btrfs_start_transaction(fs_info->tree_root, 0);
if (IS_ERR(trans)) {
@@ -5592,8 +5592,8 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
ret = walk_log_tree(trans, log_root_tree, &wc);
if (ret) {
- btrfs_handle_fs_error(fs_info, ret, "Failed to pin buffers while "
- "recovering log root tree.");
+ btrfs_handle_fs_error(fs_info, ret,
+ "Failed to pin buffers while recovering log root tree.");
goto error;
}
@@ -5639,8 +5639,8 @@ again:
free_extent_buffer(log->node);
free_extent_buffer(log->commit_root);
kfree(log);
- btrfs_handle_fs_error(fs_info, ret, "Couldn't read target root "
- "for tree log recovery.");
+ btrfs_handle_fs_error(fs_info, ret,
+ "Couldn't read target root for tree log recovery.");
goto error;
}
@@ -5689,7 +5689,7 @@ again:
free_extent_buffer(log_root_tree->node);
log_root_tree->log_root = NULL;
- fs_info->log_root_recovering = 0;
+ clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
kfree(log_root_tree);
return 0;
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 778282944530..7fc89e4adb41 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -69,8 +69,9 @@ static int btrfs_uuid_tree_lookup(struct btrfs_root *uuid_root, u8 *uuid,
ret = -ENOENT;
if (!IS_ALIGNED(item_size, sizeof(u64))) {
- btrfs_warn(uuid_root->fs_info, "uuid item with illegal size %lu!",
- (unsigned long)item_size);
+ btrfs_warn(uuid_root->fs_info,
+ "uuid item with illegal size %lu!",
+ (unsigned long)item_size);
goto out;
}
while (item_size) {
@@ -137,10 +138,10 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans,
offset = btrfs_item_ptr_offset(eb, slot);
offset += btrfs_item_size_nr(eb, slot) - sizeof(subid_le);
} else if (ret < 0) {
- btrfs_warn(uuid_root->fs_info, "insert uuid item failed %d "
- "(0x%016llx, 0x%016llx) type %u!",
- ret, (unsigned long long)key.objectid,
- (unsigned long long)key.offset, type);
+ btrfs_warn(uuid_root->fs_info,
+ "insert uuid item failed %d (0x%016llx, 0x%016llx) type %u!",
+ ret, (unsigned long long)key.objectid,
+ (unsigned long long)key.offset, type);
goto out;
}
@@ -184,8 +185,8 @@ int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1);
if (ret < 0) {
- btrfs_warn(uuid_root->fs_info, "error %d while searching for uuid item!",
- ret);
+ btrfs_warn(uuid_root->fs_info,
+ "error %d while searching for uuid item!", ret);
goto out;
}
if (ret > 0) {
@@ -198,8 +199,9 @@ int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
offset = btrfs_item_ptr_offset(eb, slot);
item_size = btrfs_item_size_nr(eb, slot);
if (!IS_ALIGNED(item_size, sizeof(u64))) {
- btrfs_warn(uuid_root->fs_info, "uuid item with illegal size %lu!",
- (unsigned long)item_size);
+ btrfs_warn(uuid_root->fs_info,
+ "uuid item with illegal size %lu!",
+ (unsigned long)item_size);
ret = -ENOENT;
goto out;
}
@@ -299,8 +301,9 @@ again_search_slot:
offset = btrfs_item_ptr_offset(leaf, slot);
item_size = btrfs_item_size_nr(leaf, slot);
if (!IS_ALIGNED(item_size, sizeof(u64))) {
- btrfs_warn(fs_info, "uuid item with illegal size %lu!",
- (unsigned long)item_size);
+ btrfs_warn(fs_info,
+ "uuid item with illegal size %lu!",
+ (unsigned long)item_size);
goto skip;
}
while (item_size) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 035efce603a9..71a60cc01451 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -859,7 +859,7 @@ static void btrfs_close_bdev(struct btrfs_device *device)
blkdev_put(device->bdev, device->mode);
}
-static void btrfs_close_one_device(struct btrfs_device *device)
+static void btrfs_prepare_close_one_device(struct btrfs_device *device)
{
struct btrfs_fs_devices *fs_devices = device->fs_devices;
struct btrfs_device *new_device;
@@ -877,8 +877,6 @@ static void btrfs_close_one_device(struct btrfs_device *device)
if (device->missing)
fs_devices->missing_devices--;
- btrfs_close_bdev(device);
-
new_device = btrfs_alloc_device(NULL, &device->devid,
device->uuid);
BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
@@ -892,23 +890,39 @@ static void btrfs_close_one_device(struct btrfs_device *device)
list_replace_rcu(&device->dev_list, &new_device->dev_list);
new_device->fs_devices = device->fs_devices;
-
- call_rcu(&device->rcu, free_device);
}
static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device, *tmp;
+ struct list_head pending_put;
+
+ INIT_LIST_HEAD(&pending_put);
if (--fs_devices->opened > 0)
return 0;
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
- btrfs_close_one_device(device);
+ btrfs_prepare_close_one_device(device);
+ list_add(&device->dev_list, &pending_put);
}
mutex_unlock(&fs_devices->device_list_mutex);
+ /*
+ * btrfs_show_devname() is using the device_list_mutex,
+ * sometimes call to blkdev_put() leads vfs calling
+ * into this func. So do put outside of device_list_mutex,
+ * as of now.
+ */
+ while (!list_empty(&pending_put)) {
+ device = list_first_entry(&pending_put,
+ struct btrfs_device, dev_list);
+ list_del(&device->dev_list);
+ btrfs_close_bdev(device);
+ call_rcu(&device->rcu, free_device);
+ }
+
WARN_ON(fs_devices->open_devices);
WARN_ON(fs_devices->rw_devices);
fs_devices->opened = 0;
@@ -1140,12 +1154,12 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
ret = device_list_add(path, disk_super, devid, fs_devices_ret);
if (ret > 0) {
if (disk_super->label[0]) {
- printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
+ pr_info("BTRFS: device label %s ", disk_super->label);
} else {
- printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
+ pr_info("BTRFS: device fsid %pU ", disk_super->fsid);
}
- printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
+ pr_cont("devid %llu transid %llu %s\n", devid, transid, path);
ret = 0;
}
if (!ret && fs_devices_ret)
@@ -1846,7 +1860,6 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
u64 num_devices;
int ret = 0;
bool clear_super = false;
- char *dev_name = NULL;
mutex_lock(&uuid_mutex);
@@ -1882,11 +1895,6 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
list_del_init(&device->dev_alloc_list);
device->fs_devices->rw_devices--;
unlock_chunks(root);
- dev_name = kstrdup(device->name->str, GFP_KERNEL);
- if (!dev_name) {
- ret = -ENOMEM;
- goto error_undo;
- }
clear_super = true;
}
@@ -1936,14 +1944,21 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
}
- btrfs_close_bdev(device);
-
- call_rcu(&device->rcu, free_device);
-
num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ /*
+ * at this point, the device is zero sized and detached from
+ * the devices list. All that's left is to zero out the old
+ * supers and free the device.
+ */
+ if (device->writeable)
+ btrfs_scratch_superblocks(device->bdev, device->name->str);
+
+ btrfs_close_bdev(device);
+ call_rcu(&device->rcu, free_device);
+
if (cur_devices->open_devices == 0) {
struct btrfs_fs_devices *fs_devices;
fs_devices = root->fs_info->fs_devices;
@@ -1962,24 +1977,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
root->fs_info->num_tolerated_disk_barrier_failures =
btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
- /*
- * at this point, the device is zero sized. We want to
- * remove it from the devices list and zero out the old super
- */
- if (clear_super) {
- struct block_device *bdev;
-
- bdev = blkdev_get_by_path(dev_name, FMODE_READ | FMODE_EXCL,
- root->fs_info->bdev_holder);
- if (!IS_ERR(bdev)) {
- btrfs_scratch_superblocks(bdev, dev_name);
- blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
- }
- }
-
out:
- kfree(dev_name);
-
mutex_unlock(&uuid_mutex);
return ret;
@@ -2494,9 +2492,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
ret = btrfs_relocate_sys_chunks(root);
if (ret < 0)
btrfs_handle_fs_error(root->fs_info, ret,
- "Failed to relocate sys chunks after "
- "device initialization. This can be fixed "
- "using the \"btrfs balance\" command.");
+ "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
trans = btrfs_attach_transaction(root);
if (IS_ERR(trans)) {
if (PTR_ERR(trans) == -ENOENT)
@@ -2555,7 +2551,8 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
devices = &fs_info->fs_devices->devices;
list_for_each_entry(device, devices, dev_list) {
if (device->bdev == bdev) {
- btrfs_err(fs_info, "target device is in the filesystem!");
+ btrfs_err(fs_info,
+ "target device is in the filesystem!");
ret = -EEXIST;
goto error;
}
@@ -2564,7 +2561,8 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
if (i_size_read(bdev->bd_inode) <
btrfs_device_get_total_bytes(srcdev)) {
- btrfs_err(fs_info, "target device is smaller than source device!");
+ btrfs_err(fs_info,
+ "target device is smaller than source device!");
ret = -EINVAL;
goto error;
}
@@ -3698,7 +3696,7 @@ error:
btrfs_free_path(path);
if (enospc_errors) {
btrfs_info(fs_info, "%d enospc errors during balance",
- enospc_errors);
+ enospc_errors);
if (!ret)
ret = -ENOSPC;
}
@@ -3792,8 +3790,8 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
!(bctl->flags & BTRFS_BALANCE_METADATA) ||
memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
- btrfs_err(fs_info, "with mixed groups data and "
- "metadata balance options must be the same");
+ btrfs_err(fs_info,
+ "with mixed groups data and metadata balance options must be the same");
ret = -EINVAL;
goto out;
}
@@ -3815,23 +3813,23 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_RAID6);
if (validate_convert_profile(&bctl->data, allowed)) {
- btrfs_err(fs_info, "unable to start balance with target "
- "data profile %llu",
- bctl->data.target);
+ btrfs_err(fs_info,
+ "unable to start balance with target data profile %llu",
+ bctl->data.target);
ret = -EINVAL;
goto out;
}
if (validate_convert_profile(&bctl->meta, allowed)) {
btrfs_err(fs_info,
- "unable to start balance with target metadata profile %llu",
- bctl->meta.target);
+ "unable to start balance with target metadata profile %llu",
+ bctl->meta.target);
ret = -EINVAL;
goto out;
}
if (validate_convert_profile(&bctl->sys, allowed)) {
btrfs_err(fs_info,
- "unable to start balance with target system profile %llu",
- bctl->sys.target);
+ "unable to start balance with target system profile %llu",
+ bctl->sys.target);
ret = -EINVAL;
goto out;
}
@@ -3851,10 +3849,11 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
(fs_info->avail_metadata_alloc_bits & allowed) &&
!(bctl->meta.target & allowed))) {
if (bctl->flags & BTRFS_BALANCE_FORCE) {
- btrfs_info(fs_info, "force reducing metadata integrity");
+ btrfs_info(fs_info,
+ "force reducing metadata integrity");
} else {
- btrfs_err(fs_info, "balance will reduce metadata "
- "integrity, use force if you want this");
+ btrfs_err(fs_info,
+ "balance will reduce metadata integrity, use force if you want this");
ret = -EINVAL;
goto out;
}
@@ -3864,8 +3863,8 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
btrfs_warn(fs_info,
- "metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
- bctl->meta.target, bctl->data.target);
+ "metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
+ bctl->meta.target, bctl->data.target);
}
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
@@ -4221,7 +4220,7 @@ out:
if (ret)
btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
else
- fs_info->update_uuid_tree_gen = 1;
+ set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
up(&fs_info->uuid_tree_rescan_sem);
return 0;
}
@@ -4913,15 +4912,16 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
read_unlock(&em_tree->lock);
if (!em) {
- btrfs_crit(extent_root->fs_info, "unable to find logical "
- "%Lu len %Lu", chunk_offset, chunk_size);
+ btrfs_crit(extent_root->fs_info,
+ "unable to find logical %Lu len %Lu",
+ chunk_offset, chunk_size);
return -EINVAL;
}
if (em->start != chunk_offset || em->len != chunk_size) {
- btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
- " %Lu-%Lu, found %Lu-%Lu", chunk_offset,
- chunk_size, em->start, em->len);
+ btrfs_crit(extent_root->fs_info,
+ "found a bad mapping, wanted %Lu-%Lu, found %Lu-%Lu",
+ chunk_offset, chunk_size, em->start, em->len);
free_extent_map(em);
return -EINVAL;
}
@@ -5154,9 +5154,9 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
}
if (em->start > logical || em->start + em->len < logical) {
- btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
- "%Lu-%Lu", logical, logical+len, em->start,
- em->start + em->len);
+ btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got %Lu-%Lu",
+ logical, logical+len, em->start,
+ em->start + em->len);
free_extent_map(em);
return 1;
}
@@ -5370,9 +5370,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
}
if (em->start > logical || em->start + em->len < logical) {
- btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
- "found %Lu-%Lu", logical, em->start,
- em->start + em->len);
+ btrfs_crit(fs_info,
+ "found a bad mapping, wanted %Lu, found %Lu-%Lu",
+ logical, em->start, em->start + em->len);
free_extent_map(em);
return -EINVAL;
}
@@ -5390,9 +5390,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
stripe_offset = stripe_nr * stripe_len;
if (offset < stripe_offset) {
- btrfs_crit(fs_info, "stripe math has gone wrong, "
- "stripe_offset=%llu, offset=%llu, start=%llu, "
- "logical=%llu, stripe_len=%llu",
+ btrfs_crit(fs_info,
+ "stripe math has gone wrong, stripe_offset=%llu, offset=%llu, start=%llu, logical=%llu, stripe_len=%llu",
stripe_offset, offset, em->start, logical,
stripe_len);
free_extent_map(em);
@@ -5642,8 +5641,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
mirror_num = stripe_index + 1;
}
if (stripe_index >= map->num_stripes) {
- btrfs_crit(fs_info, "stripe index math went horribly wrong, "
- "got stripe_index=%u, num_stripes=%u",
+ btrfs_crit(fs_info,
+ "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
stripe_index, map->num_stripes);
ret = -EINVAL;
goto out;
@@ -5907,10 +5906,11 @@ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
mirror_num, need_raid_map);
}
-int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
+int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
u64 chunk_start, u64 physical, u64 devid,
u64 **logical, int *naddrs, int *stripe_len)
{
+ struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct extent_map_tree *em_tree = &map_tree->map_tree;
struct extent_map *em;
struct map_lookup *map;
@@ -5926,13 +5926,13 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
read_unlock(&em_tree->lock);
if (!em) {
- printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
- chunk_start);
+ btrfs_err(fs_info, "couldn't find em for chunk %Lu",
+ chunk_start);
return -EIO;
}
if (em->start != chunk_start) {
- printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
+ btrfs_err(fs_info, "bad chunk start, em=%Lu, wanted=%Lu",
em->start, chunk_start);
free_extent_map(em);
return -EIO;
@@ -6137,10 +6137,12 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
rcu_read_lock();
name = rcu_dereference(dev->name);
- pr_debug("btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu "
- "(%s id %llu), size=%u\n", bio_op(bio), bio->bi_opf,
- (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
- name->str, dev->devid, bio->bi_iter.bi_size);
+ btrfs_debug(fs_info,
+ "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
+ bio_op(bio), bio->bi_opf,
+ (u64)bio->bi_iter.bi_sector,
+ (u_long)dev->bdev->bd_dev, name->str, dev->devid,
+ bio->bi_iter.bi_size);
rcu_read_unlock();
}
#endif
@@ -6215,8 +6217,9 @@ int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
}
if (map_length < length) {
- btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
- logical, length, map_length);
+ btrfs_crit(root->fs_info,
+ "mapping failed logical %llu bio len %llu len %llu",
+ logical, length, map_length);
BUG();
}
@@ -6483,8 +6486,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
free_extent_map(em);
return -EIO;
}
- btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing",
- devid, uuid);
+ btrfs_warn(root->fs_info,
+ "devid %llu uuid %pU is missing",
+ devid, uuid);
}
map->stripes[i].dev->in_fs_metadata = 1;
}
@@ -6661,7 +6665,8 @@ static int read_one_dev(struct btrfs_root *root,
int btrfs_read_sys_array(struct btrfs_root *root)
{
- struct btrfs_super_block *super_copy = root->fs_info->super_copy;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_super_block *super_copy = fs_info->super_copy;
struct extent_buffer *sb;
struct btrfs_disk_key *disk_key;
struct btrfs_chunk *chunk;
@@ -6732,8 +6737,8 @@ int btrfs_read_sys_array(struct btrfs_root *root)
num_stripes = btrfs_chunk_num_stripes(sb, chunk);
if (!num_stripes) {
- printk(KERN_ERR
- "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
+ btrfs_err(fs_info,
+ "invalid number of stripes %u in sys_array at offset %u",
num_stripes, cur_offset);
ret = -EIO;
break;
@@ -6741,7 +6746,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
type = btrfs_chunk_type(sb, chunk);
if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
- btrfs_err(root->fs_info,
+ btrfs_err(fs_info,
"invalid chunk type %llu in sys_array at offset %u",
type, cur_offset);
ret = -EIO;
@@ -6756,9 +6761,9 @@ int btrfs_read_sys_array(struct btrfs_root *root)
if (ret)
break;
} else {
- printk(KERN_ERR
- "BTRFS: unexpected item type %u in sys_array at offset %u\n",
- (u32)key.type, cur_offset);
+ btrfs_err(fs_info,
+ "unexpected item type %u in sys_array at offset %u",
+ (u32)key.type, cur_offset);
ret = -EIO;
break;
}
@@ -6771,7 +6776,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
return ret;
out_short_read:
- printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
+ btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
len, cur_offset);
clear_extent_buffer_uptodate(sb);
free_extent_buffer_stale(sb);
@@ -7095,10 +7100,12 @@ int btrfs_get_dev_stats(struct btrfs_root *root,
mutex_unlock(&fs_devices->device_list_mutex);
if (!dev) {
- btrfs_warn(root->fs_info, "get dev_stats failed, device not found");
+ btrfs_warn(root->fs_info,
+ "get dev_stats failed, device not found");
return -ENODEV;
} else if (!dev->dev_stats_valid) {
- btrfs_warn(root->fs_info, "get dev_stats failed, not yet valid");
+ btrfs_warn(root->fs_info,
+ "get dev_stats failed, not yet valid");
return -ENODEV;
} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 6613e6335ca2..09ed29c67848 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -382,7 +382,7 @@ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num,
int need_raid_map);
-int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
+int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
u64 chunk_start, u64 physical, u64 devid,
u64 **logical, int *naddrs, int *stripe_len);
int btrfs_read_sys_array(struct btrfs_root *root);
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 88d274e8ecf2..441b81a3e545 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -95,7 +95,7 @@ static int zlib_compress_pages(struct list_head *ws,
*total_in = 0;
if (Z_OK != zlib_deflateInit(&workspace->strm, 3)) {
- printk(KERN_WARNING "BTRFS: deflateInit failed\n");
+ pr_warn("BTRFS: deflateInit failed\n");
ret = -EIO;
goto out;
}
@@ -123,7 +123,7 @@ static int zlib_compress_pages(struct list_head *ws,
while (workspace->strm.total_in < len) {
ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
if (ret != Z_OK) {
- printk(KERN_DEBUG "BTRFS: deflate in loop returned %d\n",
+ pr_debug("BTRFS: deflate in loop returned %d\n",
ret);
zlib_deflateEnd(&workspace->strm);
ret = -EIO;
@@ -249,7 +249,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
}
if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
- printk(KERN_WARNING "BTRFS: inflateInit failed\n");
+ pr_warn("BTRFS: inflateInit failed\n");
return -EIO;
}
while (workspace->strm.total_in < srclen) {
@@ -339,7 +339,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
}
if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
- printk(KERN_WARNING "BTRFS: inflateInit failed\n");
+ pr_warn("BTRFS: inflateInit failed\n");
return -EIO;
}