summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ioctl.c
diff options
context:
space:
mode:
authorFilipe Manana <fdmanana@suse.com>2022-01-20 17:11:52 +0000
committerDavid Sterba <dsterba@suse.com>2022-01-24 18:10:56 +0100
commit3c9d31c715948aaff0ee6d322a91a2dec07770bf (patch)
treef912541aa7bc8ec0a76482f3901e21c5e6a6b05b /fs/btrfs/ioctl.c
parent0cb5950f3f3b51a4e8657d106f897f2b913e0586 (diff)
downloadlinux-3c9d31c715948aaff0ee6d322a91a2dec07770bf.tar.gz
linux-3c9d31c715948aaff0ee6d322a91a2dec07770bf.tar.bz2
linux-3c9d31c715948aaff0ee6d322a91a2dec07770bf.zip
btrfs: add back missing dirty page rate limiting to defrag
A defrag operation can dirty a lot of pages, specially if operating on the entire file or a large file range. Any task dirtying pages should periodically call balance_dirty_pages_ratelimited(), as stated in that function's comments, otherwise they can leave too many dirty pages in the system. This is what we did before the refactoring in 5.16, and it should have remained, just like in the buffered write path and relocation. So restore that behaviour. Fixes: 7b508037d4cac3 ("btrfs: defrag: use defrag_one_cluster() to implement btrfs_defrag_file()") CC: stable@vger.kernel.org # 5.16 Reviewed-by: Qu Wenruo <wqu@suse.com> Signed-off-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/ioctl.c')
-rw-r--r--fs/btrfs/ioctl.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a883f1824a17..ac420060aac3 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1579,6 +1579,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
}
while (cur < last_byte) {
+ const unsigned long prev_sectors_defragged = sectors_defragged;
u64 cluster_end;
/* The cluster size 256K should always be page aligned */
@@ -1610,6 +1611,10 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
cluster_end + 1 - cur, extent_thresh,
newer_than, do_compress,
&sectors_defragged, max_to_defrag);
+
+ if (sectors_defragged > prev_sectors_defragged)
+ balance_dirty_pages_ratelimited(inode->i_mapping);
+
btrfs_inode_unlock(inode, 0);
if (ret < 0)
break;