summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/compression.c
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2024-01-29 20:16:06 +1030
committerDavid Sterba <dsterba@suse.com>2024-05-07 21:31:02 +0200
commit6de3595473b0bae11102ef6db40e6f2334f13ed2 (patch)
treeeda606a551da51c5cb017e94ecb6683cc25a332a /fs/btrfs/compression.c
parent5d6f0e9890ed857a0bafb7fa73c85bf49bbe1e14 (diff)
downloadlinux-6de3595473b0bae11102ef6db40e6f2334f13ed2.tar.gz
linux-6de3595473b0bae11102ef6db40e6f2334f13ed2.tar.bz2
linux-6de3595473b0bae11102ef6db40e6f2334f13ed2.zip
btrfs: compression: add error handling for missed page cache
For all the supported compression algorithms, the compression path would always need to grab the page cache, then do the compression. Normally we would get a page reference without any problem, since the write path should have already locked the pages in the write range. For the sake of error handling, we should handle the page cache miss case. Adds a common wrapper, btrfs_compress_find_get_page(), which calls find_get_page(), and do the error handling along with an error message. Callers inside compression path would only need to call btrfs_compress_find_get_page(), and error out if it returned any error. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/compression.c')
-rw-r--r--fs/btrfs/compression.c23
1 files changed, 23 insertions, 0 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index b2b94009959d..3c9e22b5481f 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -974,6 +974,29 @@ static unsigned int btrfs_compress_set_level(int type, unsigned level)
return level;
}
+/* Wrapper around find_get_page(), with extra error message. */
+int btrfs_compress_find_get_page(struct address_space *mapping, u64 start,
+ struct page **in_page_ret)
+{
+ struct page *in_page;
+
+ /*
+ * The compressed write path should have the page locked already, thus
+ * we only need to grab one reference of the page cache.
+ */
+ in_page = find_get_page(mapping, start >> PAGE_SHIFT);
+ if (unlikely(!in_page)) {
+ struct btrfs_inode *inode = BTRFS_I(mapping->host);
+
+ btrfs_crit(inode->root->fs_info,
+ "failed to get page cache, root %lld ino %llu file offset %llu",
+ inode->root->root_key.objectid, btrfs_ino(inode), start);
+ return -ENOENT;
+ }
+ *in_page_ret = in_page;
+ return 0;
+}
+
/*
* Given an address space and start and length, compress the bytes into @pages
* that are allocated on demand.