diff options
author | Srikar Dronamraju <srikar@linux.vnet.ibm.com> | 2020-09-21 15:26:44 +0530 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2020-10-06 23:22:26 +1100 |
commit | 4ca234a9cbd7c3a656b34dd98c8b156f70ed7849 (patch) | |
tree | 84674ae66b7c470490aac8012a8b098f95b2db95 /arch/powerpc/kernel/smp.c | |
parent | 4bce545903fa0290e011cf118997717f0c4f4d20 (diff) | |
download | linux-4ca234a9cbd7c3a656b34dd98c8b156f70ed7849.tar.gz linux-4ca234a9cbd7c3a656b34dd98c8b156f70ed7849.tar.bz2 linux-4ca234a9cbd7c3a656b34dd98c8b156f70ed7849.zip |
powerpc/smp: Stop updating cpu_core_mask
Anton Blanchard reported that his 4096 vcpu KVM guest took around 30
minutes to boot. He also analyzed it to the time taken to iterate while
setting the cpu_core_mask.
Further analysis shows that cpu_core_mask and cpu_cpu_mask for any CPU
would be equal on Power. However updating cpu_core_mask took forever to
update as its a per cpu cpumask variable. Instead cpu_cpu_mask was a per
NODE /per DIE cpumask that was shared by all the respective CPUs.
Also cpu_cpu_mask is needed from a scheduler perspective. However
cpu_core_map is an exported symbol. Hence stop updating cpu_core_map
and make it point to cpu_cpu_mask.
Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Tested-by: Satheesh Rajendran <sathnaga@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200921095653.9701-3-srikar@linux.vnet.ibm.com
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
-rw-r--r-- | arch/powerpc/kernel/smp.c | 33 |
1 files changed, 7 insertions, 26 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 58990baa5182..bf6d4192adda 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -953,12 +953,17 @@ void __init smp_prepare_cpus(unsigned int max_cpus) local_memory_node(numa_cpu_lookup_table[cpu])); } #endif + /* + * cpu_core_map is now more updated and exists only since + * its been exported for long. It only will have a snapshot + * of cpu_cpu_mask. + */ + cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu)); } /* Init the cpumasks so the boot CPU is related to itself */ cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid)); - cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); if (has_coregroup_support()) cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid)); @@ -1260,9 +1265,7 @@ static void remove_cpu_from_masks(int cpu) { int i; - /* NB: cpu_core_mask is a superset of the others */ - for_each_cpu(i, cpu_core_mask(cpu)) { - set_cpus_unrelated(cpu, i, cpu_core_mask); + for_each_cpu(i, cpu_cpu_mask(cpu)) { set_cpus_unrelated(cpu, i, cpu_l2_cache_mask); set_cpus_unrelated(cpu, i, cpu_sibling_mask); if (has_big_cores) @@ -1312,7 +1315,6 @@ EXPORT_SYMBOL_GPL(get_physical_package_id); static void add_cpu_to_masks(int cpu) { int first_thread = cpu_first_thread_sibling(cpu); - int pkg_id = get_physical_package_id(cpu); int i; /* @@ -1320,7 +1322,6 @@ static void add_cpu_to_masks(int cpu) * add it to it's own thread sibling mask. */ cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); - cpumask_set_cpu(cpu, cpu_core_mask(cpu)); for (i = first_thread; i < first_thread + threads_per_core; i++) if (cpu_online(i)) @@ -1342,26 +1343,6 @@ static void add_cpu_to_masks(int cpu) set_cpus_related(cpu, i, cpu_coregroup_mask); } } - - if (pkg_id == -1) { - struct cpumask *(*mask)(int) = cpu_sibling_mask; - - /* - * Copy the sibling mask into core sibling mask and - * mark any CPUs on the same chip as this CPU. - */ - if (shared_caches) - mask = cpu_l2_cache_mask; - - for_each_cpu(i, mask(cpu)) - set_cpus_related(cpu, i, cpu_core_mask); - - return; - } - - for_each_cpu(i, cpu_online_mask) - if (get_physical_package_id(i) == pkg_id) - set_cpus_related(cpu, i, cpu_core_mask); } /* Activate a secondary processor. */ |