summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFabio M. De Francesco <fmdefrancesco@gmail.com>2022-08-21 20:04:00 +0200
committerAndrew Morton <akpm@linux-foundation.org>2022-09-11 21:55:09 -0700
commit21490eff121555b123f7088b935e40ee43d2c642 (patch)
treef32b1c93a1f846aaed07a82f633722ebc32fd997
parentca0ac8dfd35b218b3c95d3b38c695fbff35d94ca (diff)
downloadlinux-21490eff121555b123f7088b935e40ee43d2c642.tar.gz
linux-21490eff121555b123f7088b935e40ee43d2c642.tar.bz2
linux-21490eff121555b123f7088b935e40ee43d2c642.zip
hfs: replace kmap() with kmap_local_page() in btree.c
kmap() is being deprecated in favor of kmap_local_page(). Two main problems with kmap(): (1) It comes with an overhead as mapping space is restricted and protected by a global lock for synchronization and (2) it also requires global TLB invalidation when the kmap's pool wraps and it might block when the mapping space is fully utilized until a slot becomes available. With kmap_local_page() the mappings are per thread, CPU local, can take page faults, and can be called from any context (including interrupts). It is faster than kmap() in kernels with HIGHMEM enabled. Furthermore, the tasks can be preempted and, when they are scheduled to run again, the kernel virtual addresses are restored and still valid. Since its use in btree.c is safe everywhere, it should be preferred. Therefore, replace kmap() with kmap_local_page() in btree.c. Where possible, use the suited standard helpers (memzero_page(), memcpy_page()) instead of open coding kmap_local_page() plus memset() or memcpy(). Tested in a QEMU/KVM x86_32 VM, 6GB RAM, booting a kernel with HIGHMEM64GB enabled. Link: https://lkml.kernel.org/r/20220821180400.8198-4-fmdefrancesco@gmail.com Signed-off-by: Fabio M. De Francesco <fmdefrancesco@gmail.com> Suggested-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Viacheslav Dubeyko <slava@dubeyko.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Chaitanya Kulkarni <kch@nvidia.com> Cc: Christian Brauner (Microsoft) <brauner@kernel.org> Cc: Damien Le Moal <damien.lemoal@opensource.wdc.com> Cc: Jeff Layton <jlayton@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Kees Cook <keescook@chromium.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Theodore Ts'o <tytso@mit.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--fs/hfs/btree.c30
1 files changed, 16 insertions, 14 deletions
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 56c6782436e9..2fa4b1f8cc7f 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -80,7 +80,8 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
goto free_inode;
/* Load the header */
- head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
+ head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
+ sizeof(struct hfs_bnode_desc));
tree->root = be32_to_cpu(head->root);
tree->leaf_count = be32_to_cpu(head->leaf_count);
tree->leaf_head = be32_to_cpu(head->leaf_head);
@@ -119,12 +120,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
tree->node_size_shift = ffs(size) - 1;
tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- kunmap(page);
+ kunmap_local(head);
put_page(page);
return tree;
fail_page:
- kunmap(page);
+ kunmap_local(head);
put_page(page);
free_inode:
tree->inode->i_mapping->a_ops = &hfs_aops;
@@ -170,7 +171,8 @@ void hfs_btree_write(struct hfs_btree *tree)
return;
/* Load the header */
page = node->page[0];
- head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
+ head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
+ sizeof(struct hfs_bnode_desc));
head->root = cpu_to_be32(tree->root);
head->leaf_count = cpu_to_be32(tree->leaf_count);
@@ -181,7 +183,7 @@ void hfs_btree_write(struct hfs_btree *tree)
head->attributes = cpu_to_be32(tree->attributes);
head->depth = cpu_to_be16(tree->depth);
- kunmap(page);
+ kunmap_local(head);
set_page_dirty(page);
hfs_bnode_put(node);
}
@@ -269,7 +271,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
- data = kmap(*pagep);
+ data = kmap_local_page(*pagep);
off &= ~PAGE_MASK;
idx = 0;
@@ -282,7 +284,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
idx += i;
data[off] |= m;
set_page_dirty(*pagep);
- kunmap(*pagep);
+ kunmap_local(data);
tree->free_nodes--;
mark_inode_dirty(tree->inode);
hfs_bnode_put(node);
@@ -291,14 +293,14 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
}
}
if (++off >= PAGE_SIZE) {
- kunmap(*pagep);
- data = kmap(*++pagep);
+ kunmap_local(data);
+ data = kmap_local_page(*++pagep);
off = 0;
}
idx += 8;
len--;
}
- kunmap(*pagep);
+ kunmap_local(data);
nidx = node->next;
if (!nidx) {
printk(KERN_DEBUG "create new bmap node...\n");
@@ -314,7 +316,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
off = off16;
off += node->page_offset;
pagep = node->page + (off >> PAGE_SHIFT);
- data = kmap(*pagep);
+ data = kmap_local_page(*pagep);
off &= ~PAGE_MASK;
}
}
@@ -361,20 +363,20 @@ void hfs_bmap_free(struct hfs_bnode *node)
}
off += node->page_offset + nidx / 8;
page = node->page[off >> PAGE_SHIFT];
- data = kmap(page);
+ data = kmap_local_page(page);
off &= ~PAGE_MASK;
m = 1 << (~nidx & 7);
byte = data[off];
if (!(byte & m)) {
pr_crit("trying to free free bnode %u(%d)\n",
node->this, node->type);
- kunmap(page);
+ kunmap_local(data);
hfs_bnode_put(node);
return;
}
data[off] = byte & ~m;
set_page_dirty(page);
- kunmap(page);
+ kunmap_local(data);
hfs_bnode_put(node);
tree->free_nodes++;
mark_inode_dirty(tree->inode);