summaryrefslogtreecommitdiffstats
path: root/fs/f2fs/checkpoint.c
diff options
context:
space:
mode:
authorChao Yu <chao@kernel.org>2022-02-04 08:34:10 +0800
committerJaegeuk Kim <jaegeuk@kernel.org>2022-02-03 22:21:28 -0800
commit430f163b01888dc26696365d9c1053ba9d6c7d92 (patch)
tree076333f82610ab1c0ea54be69b4a12cb23009e58 /fs/f2fs/checkpoint.c
parent6d18762ed5cd549fde74fd0e05d4d87bac5a3beb (diff)
downloadlinux-430f163b01888dc26696365d9c1053ba9d6c7d92.tar.gz
linux-430f163b01888dc26696365d9c1053ba9d6c7d92.tar.bz2
linux-430f163b01888dc26696365d9c1053ba9d6c7d92.zip
f2fs: adjust readahead block number during recovery
In a fragmented image, entries in dnode block list may locate in incontiguous physical block address space, however, in recovery flow, we will always readahead BIO_MAX_VECS size blocks, so in such case, current readahead policy is low efficient, let's adjust readahead window size dynamically based on consecutiveness of dnode blocks. Signed-off-by: Chao Yu <chao@kernel.org> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs/checkpoint.c')
-rw-r--r--fs/f2fs/checkpoint.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index deeda95688f0..a13b6b4af220 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -282,18 +282,22 @@ out:
return blkno - start;
}
-void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
+void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index,
+ unsigned int ra_blocks)
{
struct page *page;
bool readahead = false;
+ if (ra_blocks == RECOVERY_MIN_RA_BLOCKS)
+ return;
+
page = find_get_page(META_MAPPING(sbi), index);
if (!page || !PageUptodate(page))
readahead = true;
f2fs_put_page(page, 0);
if (readahead)
- f2fs_ra_meta_pages(sbi, index, BIO_MAX_VECS, META_POR, true);
+ f2fs_ra_meta_pages(sbi, index, ra_blocks, META_POR, true);
}
static int __f2fs_write_meta_page(struct page *page,