summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk@kernel.org>2016-06-02 15:24:24 -0700
committerJaegeuk Kim <jaegeuk@kernel.org>2016-06-07 10:18:08 -0700
commite589c2c477b44e06754508a4e8b883e5ae7294aa (patch)
treeccdca1631cd37f7eef8286e0d12ddbe1e677cec8 /fs
parent29710bcf9426c84bb6a9b1d94316895ed6143813 (diff)
downloadlinux-e589c2c477b44e06754508a4e8b883e5ae7294aa.tar.gz
linux-e589c2c477b44e06754508a4e8b883e5ae7294aa.tar.bz2
linux-e589c2c477b44e06754508a4e8b883e5ae7294aa.zip
f2fs: control not to exceed # of cached nat entries
This is to avoid cache entry management overhead including radix tree. Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/f2fs/node.c4
-rw-r--r--fs/f2fs/node.h7
-rw-r--r--fs/f2fs/segment.c5
3 files changed, 16 insertions, 0 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 16532b31dcd6..b448c8fec7fc 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -52,6 +52,10 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
+ if (excess_cached_nats(sbi))
+ res = false;
+ if (nm_i->nat_cnt > DEF_NAT_CACHE_THRESHOLD)
+ res = false;
} else if (type == DIRTY_DENTS) {
if (sbi->sb->s_bdi->wb.dirty_exceeded)
return false;
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 2c2a797e18a8..673ce926cf09 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -27,6 +27,8 @@
/* control dirty nats ratio threshold (default: 10% over max nid count) */
#define DEF_DIRTY_NAT_RATIO_THRESHOLD 10
+/* control total # of nats */
+#define DEF_NAT_CACHE_THRESHOLD 100000
/* vector size for gang look-up from nat cache that consists of radix tree */
#define NATVEC_SIZE 64
@@ -126,6 +128,11 @@ static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
NM_I(sbi)->dirty_nats_ratio / 100;
}
+static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
+{
+ return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
+}
+
enum mem_type {
FREE_NIDS, /* indicates the free nid list */
NAT_ENTRIES, /* indicates the cached nat entry */
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 34a9159cf5ac..9011bffd1dd0 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -345,6 +345,11 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{
if (!need)
return;
+
+ /* balance_fs_bg is able to be pending */
+ if (excess_cached_nats(sbi))
+ f2fs_balance_fs_bg(sbi);
+
/*
* We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments.