summaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorDennis Zhou <dennis@kernel.org>2020-01-02 16:26:37 -0500
committerDavid Sterba <dsterba@suse.com>2020-01-20 16:40:59 +0100
commit4aa9ad520398bf9cef70fc9c363567da44312045 (patch)
tree77badf26527c9d58896c8d14f3f906109aff668d /fs/btrfs
parente93591bb6ecf3e31c8f5366eac143f4f9c270915 (diff)
downloadlinux-4aa9ad520398bf9cef70fc9c363567da44312045.tar.gz
linux-4aa9ad520398bf9cef70fc9c363567da44312045.tar.bz2
linux-4aa9ad520398bf9cef70fc9c363567da44312045.zip
btrfs: limit max discard size for async discard
Throttle the maximum size of a discard so that we can provide an upper bound for the rate of async discard. While the block layer is able to split discards into the appropriate sized discards, we want to be able to account more accurately the rate at which we are consuming NCQ slots as well as limit the upper bound of work for a discard. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Dennis Zhou <dennis@kernel.org> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/discard.h5
-rw-r--r--fs/btrfs/free-space-cache.c41
2 files changed, 37 insertions, 9 deletions
diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h
index 5250fe178e49..562c60fab77a 100644
--- a/fs/btrfs/discard.h
+++ b/fs/btrfs/discard.h
@@ -3,10 +3,15 @@
#ifndef BTRFS_DISCARD_H
#define BTRFS_DISCARD_H
+#include <linux/sizes.h>
+
struct btrfs_fs_info;
struct btrfs_discard_ctl;
struct btrfs_block_group;
+/* Discard size limits */
+#define BTRFS_ASYNC_DISCARD_MAX_SIZE (SZ_64M)
+
/* Work operations */
void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
struct btrfs_block_group *block_group);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 40fb918a82f4..438043aab6fb 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -3466,16 +3466,36 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group,
extent_start = entry->offset;
extent_bytes = entry->bytes;
extent_trim_state = entry->trim_state;
- start = max(start, extent_start);
- bytes = min(extent_start + extent_bytes, end) - start;
- if (bytes < minlen) {
- spin_unlock(&ctl->tree_lock);
- mutex_unlock(&ctl->cache_writeout_mutex);
- goto next;
- }
+ if (async) {
+ start = entry->offset;
+ bytes = entry->bytes;
+ if (bytes < minlen) {
+ spin_unlock(&ctl->tree_lock);
+ mutex_unlock(&ctl->cache_writeout_mutex);
+ goto next;
+ }
+ unlink_free_space(ctl, entry);
+ if (bytes > BTRFS_ASYNC_DISCARD_MAX_SIZE) {
+ bytes = BTRFS_ASYNC_DISCARD_MAX_SIZE;
+ extent_bytes = BTRFS_ASYNC_DISCARD_MAX_SIZE;
+ entry->offset += BTRFS_ASYNC_DISCARD_MAX_SIZE;
+ entry->bytes -= BTRFS_ASYNC_DISCARD_MAX_SIZE;
+ link_free_space(ctl, entry);
+ } else {
+ kmem_cache_free(btrfs_free_space_cachep, entry);
+ }
+ } else {
+ start = max(start, extent_start);
+ bytes = min(extent_start + extent_bytes, end) - start;
+ if (bytes < minlen) {
+ spin_unlock(&ctl->tree_lock);
+ mutex_unlock(&ctl->cache_writeout_mutex);
+ goto next;
+ }
- unlink_free_space(ctl, entry);
- kmem_cache_free(btrfs_free_space_cachep, entry);
+ unlink_free_space(ctl, entry);
+ kmem_cache_free(btrfs_free_space_cachep, entry);
+ }
spin_unlock(&ctl->tree_lock);
trim_entry.start = extent_start;
@@ -3639,6 +3659,9 @@ static int trim_bitmaps(struct btrfs_block_group *block_group,
goto next;
}
+ if (async && bytes > BTRFS_ASYNC_DISCARD_MAX_SIZE)
+ bytes = BTRFS_ASYNC_DISCARD_MAX_SIZE;
+
bitmap_clear_bits(ctl, entry, start, bytes);
if (entry->bytes == 0)
free_bitmap(ctl, entry);