diff options
author | Chao Yu <yuchao0@huawei.com> | 2017-01-24 20:39:51 +0800 |
---|---|---|
committer | Jaegeuk Kim <jaegeuk@kernel.org> | 2017-02-22 20:24:51 -0800 |
commit | ba38c27eb93e2d36bf940ca65c145f6e2aaa6d5c (patch) | |
tree | 59e1ea714f494a85ab6875d3204058765c9afe0d /fs/f2fs/xattr.h | |
parent | 04b9a5f0f51942f9fd20e97df4ce1508f6335c59 (diff) | |
download | linux-stable-ba38c27eb93e2d36bf940ca65c145f6e2aaa6d5c.tar.gz linux-stable-ba38c27eb93e2d36bf940ca65c145f6e2aaa6d5c.tar.bz2 linux-stable-ba38c27eb93e2d36bf940ca65c145f6e2aaa6d5c.zip |
f2fs: enhance lookup xattr
Previously, in getxattr we will load all entries both in inline xattr and
xattr node block, and then do the lookup in all entries, but our lookup
flow shows low efficiency, since if we can lookup and hit in inline xattr
of inode page cache first, we don't need to load and lookup xattr node
block, which can obviously save cpu time and IO latency.
Signed-off-by: Chao Yu <yuchao0@huawei.com>
[Jaegeuk Kim: initialize NULL to avoid warning]
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs/xattr.h')
-rw-r--r-- | fs/f2fs/xattr.h | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h index f990de20cdcd..d5a94928c116 100644 --- a/fs/f2fs/xattr.h +++ b/fs/f2fs/xattr.h @@ -72,9 +72,10 @@ struct f2fs_xattr_entry { for (entry = XATTR_FIRST_ENTRY(addr);\ !IS_XATTR_LAST_ENTRY(entry);\ entry = XATTR_NEXT_ENTRY(entry)) - -#define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + PAGE_SIZE - \ - sizeof(struct node_footer) - sizeof(__u32)) +#define MAX_XATTR_BLOCK_SIZE (PAGE_SIZE - sizeof(struct node_footer)) +#define VALID_XATTR_BLOCK_SIZE (MAX_XATTR_BLOCK_SIZE - sizeof(__u32)) +#define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + \ + VALID_XATTR_BLOCK_SIZE) #define MAX_VALUE_LEN(i) (MIN_OFFSET(i) - \ sizeof(struct f2fs_xattr_header) - \ |