summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChao Yu <chao@kernel.org>2022-05-04 14:09:22 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-06-14 16:59:26 +0200
commit6cc2086923b873a75223a86ed1bdf38133634d9f (patch)
tree3094a1ddeca6e498d1ce92264a54189772b4d27f
parent82a3b7ef78cbc0a7a9a03f8b27ee09d4dbdb6491 (diff)
downloadlinux-stable-6cc2086923b873a75223a86ed1bdf38133634d9f.tar.gz
linux-stable-6cc2086923b873a75223a86ed1bdf38133634d9f.tar.bz2
linux-stable-6cc2086923b873a75223a86ed1bdf38133634d9f.zip
f2fs: fix deadloop in foreground GC
commit cfd66bb715fd11fde3338d0660cffa1396adc27d upstream. As Yanming reported in bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=215914 The root cause is: in a very small sized image, it's very easy to exceed threshold of foreground GC, if we calculate free space and dirty data based on section granularity, in corner case, has_not_enough_free_secs() will always return true, result in deadloop in f2fs_gc(). So this patch refactors has_not_enough_free_secs() as below to fix this issue: 1. calculate needed space based on block granularity, and separate all blocks to two parts, section part, and block part, comparing section part to free section, and comparing block part to free space in openned log. 2. account F2FS_DIRTY_NODES, F2FS_DIRTY_IMETA and F2FS_DIRTY_DENTS as node block consumer; 3. account F2FS_DIRTY_DENTS as data block consumer; Cc: stable@vger.kernel.org Reported-by: Ming Yan <yanming@tju.edu.cn> Signed-off-by: Chao Yu <chao.yu@oppo.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--fs/f2fs/segment.h32
1 files changed, 20 insertions, 12 deletions
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 1f5db4cbc499..d5f9c928946f 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -534,11 +534,10 @@ static inline int reserved_sections(struct f2fs_sb_info *sbi)
return GET_SEC_FROM_SEG(sbi, (unsigned int)reserved_segments(sbi));
}
-static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi)
+static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
+ unsigned int node_blocks, unsigned int dent_blocks)
{
- unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
- get_pages(sbi, F2FS_DIRTY_DENTS);
- unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
+
unsigned int segno, left_blocks;
int i;
@@ -564,19 +563,28 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi)
static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
int freed, int needed)
{
- int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
- int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
- int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
+ unsigned int total_node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
+ get_pages(sbi, F2FS_DIRTY_DENTS) +
+ get_pages(sbi, F2FS_DIRTY_IMETA);
+ unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
+ unsigned int node_secs = total_node_blocks / BLKS_PER_SEC(sbi);
+ unsigned int dent_secs = total_dent_blocks / BLKS_PER_SEC(sbi);
+ unsigned int node_blocks = total_node_blocks % BLKS_PER_SEC(sbi);
+ unsigned int dent_blocks = total_dent_blocks % BLKS_PER_SEC(sbi);
+ unsigned int free, need_lower, need_upper;
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return false;
- if (free_sections(sbi) + freed == reserved_sections(sbi) + needed &&
- has_curseg_enough_space(sbi))
+ free = free_sections(sbi) + freed;
+ need_lower = node_secs + dent_secs + reserved_sections(sbi) + needed;
+ need_upper = need_lower + (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0);
+
+ if (free > need_upper)
return false;
- return (free_sections(sbi) + freed) <=
- (node_secs + 2 * dent_secs + imeta_secs +
- reserved_sections(sbi) + needed);
+ else if (free <= need_lower)
+ return true;
+ return !has_curseg_enough_space(sbi, node_blocks, dent_blocks);
}
static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)