summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorNikolay Borisov <nborisov@suse.com>2019-03-27 14:24:16 +0200
committerDavid Sterba <dsterba@suse.com>2019-04-29 19:02:38 +0200
commit8811133d8a982d3cef5d25eef54a8dca9e8e6ded (patch)
tree85a126b664476600d44b3f352f8917635bc7e625 /fs/btrfs/extent-tree.c
parente74e3993bcf6a1d119a2bbe7af2cc278a147f930 (diff)
downloadlinux-8811133d8a982d3cef5d25eef54a8dca9e8e6ded.tar.gz
linux-8811133d8a982d3cef5d25eef54a8dca9e8e6ded.tar.bz2
linux-8811133d8a982d3cef5d25eef54a8dca9e8e6ded.zip
btrfs: Optimize unallocated chunks discard
Currently unallocated chunks are always trimmed. For example 2 consecutive trims on large storage would trim freespace twice irrespective of whether the space was actually allocated or not between those trims. Optimise this behavior by exploiting the newly introduced alloc_state tree of btrfs_device. A new CHUNK_TRIMMED bit is used to mark those unallocated chunks which have been trimmed and have not been allocated afterwards. On chunk allocation the respective underlying devices' physical space will have its CHUNK_TRIMMED flag cleared. This avoids submitting discards for space which hasn't been changed since the last time discard was issued. This applies to the single mount period of the filesystem as the information is not stored permanently. Signed-off-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c57
1 files changed, 56 insertions, 1 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 7500728bcdd3..1ebac1982a9c 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -11249,6 +11249,54 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
return unpin_extent_range(fs_info, start, end, false);
}
+static bool should_skip_trim(struct btrfs_device *device, u64 *start, u64 *len)
+{
+ u64 trimmed_start = 0, trimmed_end = 0;
+ u64 end = *start + *len - 1;
+
+ if (!find_first_extent_bit(&device->alloc_state, *start, &trimmed_start,
+ &trimmed_end, CHUNK_TRIMMED, NULL)) {
+ u64 trimmed_len = trimmed_end - trimmed_start + 1;
+
+ if (*start < trimmed_start) {
+ if (in_range(end, trimmed_start, trimmed_len) ||
+ end > trimmed_end) {
+ /*
+ * start|------|end
+ * ts|--|trimmed_len
+ * OR
+ * start|-----|end
+ * ts|-----|trimmed_len
+ */
+ *len = trimmed_start - *start;
+ return false;
+ } else if (end < trimmed_start) {
+ /*
+ * start|------|end
+ * ts|--|trimmed_len
+ */
+ return false;
+ }
+ } else if (in_range(*start, trimmed_start, trimmed_len)) {
+ if (in_range(end, trimmed_start, trimmed_len)) {
+ /*
+ * start|------|end
+ * ts|----------|trimmed_len
+ */
+ return true;
+ } else {
+ /*
+ * start|-----------|end
+ * ts|----------|trimmed_len
+ */
+ *start = trimmed_end + 1;
+ *len = end - *start + 1;
+ return false;
+ }
+ }
+ }
+ return false;
+}
/*
* It used to be that old block groups would be left around forever.
* Iterating over them would be enough to trim unused space. Since we
@@ -11319,7 +11367,14 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
start = max(range->start, start);
len = min(range->len, len);
- ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
+ if (!should_skip_trim(device, &start, &len)) {
+ ret = btrfs_issue_discard(device->bdev, start, len,
+ &bytes);
+ if (!ret)
+ set_extent_bits(&device->alloc_state, start,
+ start + bytes - 1,
+ CHUNK_TRIMMED);
+ }
mutex_unlock(&fs_info->chunk_mutex);
if (ret)