summaryrefslogtreecommitdiffstats
path: root/fs/f2fs/extent_cache.c
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk@kernel.org>2015-12-21 19:25:50 -0800
committerJaegeuk Kim <jaegeuk@kernel.org>2015-12-30 10:13:00 -0800
commit74fd8d9927ef08db30a85f131a124152aeba66c7 (patch)
tree7d9acc9ac7ea5196baa96de29fc18a0903299aec /fs/f2fs/extent_cache.c
parent7441ccef339f87abc27afc4ccfc24c014d7360c9 (diff)
downloadlinux-74fd8d9927ef08db30a85f131a124152aeba66c7.tar.gz
linux-74fd8d9927ef08db30a85f131a124152aeba66c7.tar.bz2
linux-74fd8d9927ef08db30a85f131a124152aeba66c7.zip
f2fs: speed up shrinking extent tree entries
If there is no candidates for shrinking slab entries, we don't need to traverse any trees at all. Reviewed-by: Chao Yu <chao2.yu@samsung.com> [Jaegeuk Kim: fix missing initialization reported by Yunlei He] Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs/extent_cache.c')
-rw-r--r--fs/f2fs/extent_cache.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 0e97d6af9885..5305a29f91a3 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -71,6 +71,8 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode)
atomic_set(&et->refcount, 0);
et->count = 0;
atomic_inc(&sbi->total_ext_tree);
+ } else {
+ atomic_dec(&sbi->total_zombie_tree);
}
atomic_inc(&et->refcount);
up_write(&sbi->extent_tree_lock);
@@ -547,10 +549,14 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
unsigned int found;
unsigned int node_cnt = 0, tree_cnt = 0;
int remained;
+ bool do_free = false;
if (!test_opt(sbi, EXTENT_CACHE))
return 0;
+ if (!atomic_read(&sbi->total_zombie_tree))
+ goto free_node;
+
if (!down_write_trylock(&sbi->extent_tree_lock))
goto out;
@@ -571,6 +577,7 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
radix_tree_delete(root, et->ino);
kmem_cache_free(extent_tree_slab, et);
atomic_dec(&sbi->total_ext_tree);
+ atomic_dec(&sbi->total_zombie_tree);
tree_cnt++;
if (node_cnt + tree_cnt >= nr_shrink)
@@ -580,6 +587,7 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
}
up_write(&sbi->extent_tree_lock);
+free_node:
/* 2. remove LRU extent entries */
if (!down_write_trylock(&sbi->extent_tree_lock))
goto out;
@@ -591,9 +599,13 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
if (!remained--)
break;
list_del_init(&en->list);
+ do_free = true;
}
spin_unlock(&sbi->extent_lock);
+ if (do_free == false)
+ goto unlock_out;
+
/*
* reset ino for searching victims from beginning of global extent tree.
*/
@@ -651,6 +663,7 @@ void f2fs_destroy_extent_tree(struct inode *inode)
if (inode->i_nlink && !is_bad_inode(inode) && et->count) {
atomic_dec(&et->refcount);
+ atomic_inc(&sbi->total_zombie_tree);
return;
}
@@ -716,6 +729,7 @@ void init_extent_cache_info(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&sbi->extent_list);
spin_lock_init(&sbi->extent_lock);
atomic_set(&sbi->total_ext_tree, 0);
+ atomic_set(&sbi->total_zombie_tree, 0);
atomic_set(&sbi->total_ext_node, 0);
}