summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorYury Norov <yury.norov@gmail.com>2022-01-23 10:38:57 -0800
committerYury Norov <yury.norov@gmail.com>2022-06-03 06:52:57 -0700
commitdcf23cca930d1a60f7cd6b3a245a5081d77b8081 (patch)
tree2d526eb0167d2efebc7dd1a5760c09436a6ca738 /arch
parentb55032f1067a02c7f80943dd118060952e8bd7ac (diff)
downloadlinux-stable-dcf23cca930d1a60f7cd6b3a245a5081d77b8081.tar.gz
linux-stable-dcf23cca930d1a60f7cd6b3a245a5081d77b8081.tar.bz2
linux-stable-dcf23cca930d1a60f7cd6b3a245a5081d77b8081.zip
arch/x86: replace nodes_weight with nodes_empty where appropriate
mm code calls nodes_weight() to check if any bit of a given nodemask is set. We can do it more efficiently with nodes_empty() because nodes_empty() stops traversing the nodemask as soon as it finds first set bit, while nodes_weight() counts all bits unconditionally. Signed-off-by: Yury Norov <yury.norov@gmail.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/amdtopology.c2
-rw-r--r--arch/x86/mm/numa_emulation.c4
2 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c
index 058b2f36b3a6..b3ca7d23e4b0 100644
--- a/arch/x86/mm/amdtopology.c
+++ b/arch/x86/mm/amdtopology.c
@@ -154,7 +154,7 @@ int __init amd_numa_init(void)
node_set(nodeid, numa_nodes_parsed);
}
- if (!nodes_weight(numa_nodes_parsed))
+ if (nodes_empty(numa_nodes_parsed))
return -ENOENT;
/*
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index 1a02b791d273..9a9305367fdd 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -123,7 +123,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
* Continue to fill physical nodes with fake nodes until there is no
* memory left on any of them.
*/
- while (nodes_weight(physnode_mask)) {
+ while (!nodes_empty(physnode_mask)) {
for_each_node_mask(i, physnode_mask) {
u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
u64 start, limit, end;
@@ -270,7 +270,7 @@ static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei,
* Fill physical nodes with fake nodes of size until there is no memory
* left on any of them.
*/
- while (nodes_weight(physnode_mask)) {
+ while (!nodes_empty(physnode_mask)) {
for_each_node_mask(i, physnode_mask) {
u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
u64 start, limit, end;