summaryrefslogtreecommitdiffstats
path: root/fs/erofs/zdata.c
diff options
context:
space:
mode:
authorGao Xiang <hsiangkao@linux.alibaba.com>2023-05-27 04:14:57 +0800
committerGao Xiang <hsiangkao@linux.alibaba.com>2023-05-29 23:06:03 +0800
commit7b4e372c36fcd33c74ba3cbd65fa534b9c558184 (patch)
tree0ad30bf6fdbaaa1fa9e316bf83d34d6101146ba1 /fs/erofs/zdata.c
parent967c28b23f6c89bb8eef6a046ea88afe0d7c1029 (diff)
downloadlinux-stable-7b4e372c36fcd33c74ba3cbd65fa534b9c558184.tar.gz
linux-stable-7b4e372c36fcd33c74ba3cbd65fa534b9c558184.tar.bz2
linux-stable-7b4e372c36fcd33c74ba3cbd65fa534b9c558184.zip
erofs: adapt managed inode operations into folios
This patch gets rid of erofs_try_to_free_cached_page() and fold it into .release_folio(). It also moves managed inode operations into zdata.c, which simplifies the code a bit. No logic changes. Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com> Reviewed-by: Yue Hu <huyue2@coolpad.com> Link: https://lore.kernel.org/r/20230526201459.128169-5-hsiangkao@linux.alibaba.com
Diffstat (limited to 'fs/erofs/zdata.c')
-rw-r--r--fs/erofs/zdata.c59
1 files changed, 51 insertions, 8 deletions
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index d29552ea53fc..c556906354e5 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -665,29 +665,72 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
return 0;
}
-int erofs_try_to_free_cached_page(struct page *page)
+static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
{
- struct z_erofs_pcluster *const pcl = (void *)page_private(page);
- int ret, i;
+ struct z_erofs_pcluster *pcl = folio_get_private(folio);
+ bool ret;
+ int i;
+
+ if (!folio_test_private(folio))
+ return true;
if (!erofs_workgroup_try_to_freeze(&pcl->obj, 1))
- return 0;
+ return false;
- ret = 0;
+ ret = false;
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
for (i = 0; i < pcl->pclusterpages; ++i) {
- if (pcl->compressed_bvecs[i].page == page) {
+ if (pcl->compressed_bvecs[i].page == &folio->page) {
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
- ret = 1;
+ ret = true;
break;
}
}
erofs_workgroup_unfreeze(&pcl->obj, 1);
+
if (ret)
- detach_page_private(page);
+ folio_detach_private(folio);
return ret;
}
+/*
+ * It will be called only on inode eviction. In case that there are still some
+ * decompression requests in progress, wait with rescheduling for a bit here.
+ * An extra lock could be introduced instead but it seems unnecessary.
+ */
+static void z_erofs_cache_invalidate_folio(struct folio *folio,
+ size_t offset, size_t length)
+{
+ const size_t stop = length + offset;
+
+ /* Check for potential overflow in debug mode */
+ DBG_BUGON(stop > folio_size(folio) || stop < length);
+
+ if (offset == 0 && stop == folio_size(folio))
+ while (!z_erofs_cache_release_folio(folio, GFP_NOFS))
+ cond_resched();
+}
+
+static const struct address_space_operations z_erofs_cache_aops = {
+ .release_folio = z_erofs_cache_release_folio,
+ .invalidate_folio = z_erofs_cache_invalidate_folio,
+};
+
+int erofs_init_managed_cache(struct super_block *sb)
+{
+ struct inode *const inode = new_inode(sb);
+
+ if (!inode)
+ return -ENOMEM;
+
+ set_nlink(inode, 1);
+ inode->i_size = OFFSET_MAX;
+ inode->i_mapping->a_ops = &z_erofs_cache_aops;
+ mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
+ EROFS_SB(sb)->managed_cache = inode;
+ return 0;
+}
+
static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
struct z_erofs_bvec *bvec)
{