summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2024-01-29 20:16:06 +1030
committerDavid Sterba <dsterba@suse.com>2024-05-07 21:31:02 +0200
commit6de3595473b0bae11102ef6db40e6f2334f13ed2 (patch)
treeeda606a551da51c5cb017e94ecb6683cc25a332a
parent5d6f0e9890ed857a0bafb7fa73c85bf49bbe1e14 (diff)
downloadlinux-6de3595473b0bae11102ef6db40e6f2334f13ed2.tar.gz
linux-6de3595473b0bae11102ef6db40e6f2334f13ed2.tar.bz2
linux-6de3595473b0bae11102ef6db40e6f2334f13ed2.zip
btrfs: compression: add error handling for missed page cache
For all the supported compression algorithms, the compression path would always need to grab the page cache, then do the compression. Normally we would get a page reference without any problem, since the write path should have already locked the pages in the write range. For the sake of error handling, we should handle the page cache miss case. Adds a common wrapper, btrfs_compress_find_get_page(), which calls find_get_page(), and do the error handling along with an error message. Callers inside compression path would only need to call btrfs_compress_find_get_page(), and error out if it returned any error. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/compression.c23
-rw-r--r--fs/btrfs/compression.h3
-rw-r--r--fs/btrfs/lzo.c5
-rw-r--r--fs/btrfs/zlib.c14
-rw-r--r--fs/btrfs/zstd.c9
5 files changed, 46 insertions, 8 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index b2b94009959d..3c9e22b5481f 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -974,6 +974,29 @@ static unsigned int btrfs_compress_set_level(int type, unsigned level)
return level;
}
+/* Wrapper around find_get_page(), with extra error message. */
+int btrfs_compress_find_get_page(struct address_space *mapping, u64 start,
+ struct page **in_page_ret)
+{
+ struct page *in_page;
+
+ /*
+ * The compressed write path should have the page locked already, thus
+ * we only need to grab one reference of the page cache.
+ */
+ in_page = find_get_page(mapping, start >> PAGE_SHIFT);
+ if (unlikely(!in_page)) {
+ struct btrfs_inode *inode = BTRFS_I(mapping->host);
+
+ btrfs_crit(inode->root->fs_info,
+ "failed to get page cache, root %lld ino %llu file offset %llu",
+ inode->root->root_key.objectid, btrfs_ino(inode), start);
+ return -ENOENT;
+ }
+ *in_page_ret = in_page;
+ return 0;
+}
+
/*
* Given an address space and start and length, compress the bytes into @pages
* that are allocated on demand.
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 4691a84ca838..7590dc86d040 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -149,6 +149,9 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len);
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
+int btrfs_compress_find_get_page(struct address_space *mapping, u64 start,
+ struct page **in_page_ret);
+
int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
u64 start, struct page **pages, unsigned long *out_pages,
unsigned long *total_in, unsigned long *total_out);
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 3e5d3b7028e8..6ac2cd177d44 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -244,8 +244,9 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
/* Get the input page first */
if (!page_in) {
- page_in = find_get_page(mapping, cur_in >> PAGE_SHIFT);
- ASSERT(page_in);
+ ret = btrfs_compress_find_get_page(mapping, cur_in, &page_in);
+ if (ret < 0)
+ goto out;
}
/* Compress at most one sector of data each time */
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index e5b3f2003896..ad6f011eab69 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -151,9 +151,12 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
if (data_in) {
kunmap_local(data_in);
put_page(in_page);
+ data_in = NULL;
}
- in_page = find_get_page(mapping,
- start >> PAGE_SHIFT);
+ ret = btrfs_compress_find_get_page(mapping,
+ start, &in_page);
+ if (ret < 0)
+ goto out;
data_in = kmap_local_page(in_page);
copy_page(workspace->buf + i * PAGE_SIZE,
data_in);
@@ -164,9 +167,12 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
if (data_in) {
kunmap_local(data_in);
put_page(in_page);
+ data_in = NULL;
}
- in_page = find_get_page(mapping,
- start >> PAGE_SHIFT);
+ ret = btrfs_compress_find_get_page(mapping,
+ start, &in_page);
+ if (ret < 0)
+ goto out;
data_in = kmap_local_page(in_page);
start += PAGE_SIZE;
workspace->strm.next_in = data_in;
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 92b3744b819b..b647ad036af3 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -406,7 +406,9 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
}
/* map in the first page of input data */
- in_page = find_get_page(mapping, start >> PAGE_SHIFT);
+ ret = btrfs_compress_find_get_page(mapping, start, &in_page);
+ if (ret < 0)
+ goto out;
workspace->in_buf.src = kmap_local_page(in_page);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
@@ -479,10 +481,13 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
if (workspace->in_buf.pos == workspace->in_buf.size) {
tot_in += PAGE_SIZE;
kunmap_local(workspace->in_buf.src);
+ workspace->in_buf.src = NULL;
put_page(in_page);
start += PAGE_SIZE;
len -= PAGE_SIZE;
- in_page = find_get_page(mapping, start >> PAGE_SHIFT);
+ ret = btrfs_compress_find_get_page(mapping, start, &in_page);
+ if (ret < 0)
+ goto out;
workspace->in_buf.src = kmap_local_page(in_page);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);