summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/cpuidle.c50
-rw-r--r--arch/arm64/kernel/head.S20
-rw-r--r--arch/arm64/kernel/hibernate-asm.S8
-rw-r--r--arch/arm64/kernel/hibernate.c2
-rw-r--r--arch/arm64/kernel/kaslr.c11
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c18
-rw-r--r--arch/arm64/kernel/perf_event.c2
-rw-r--r--arch/arm64/kernel/process.c76
-rw-r--r--arch/arm64/kernel/psci.c10
-rw-r--r--arch/arm64/kernel/setup.c20
-rw-r--r--arch/arm64/kernel/smp.c4
-rw-r--r--arch/arm64/kernel/topology.c312
12 files changed, 204 insertions, 329 deletions
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index d1048173fd8a..e4d6af2fdec7 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -11,6 +11,7 @@
#include <linux/cpu_pm.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/psci.h>
#include <asm/cpuidle.h>
#include <asm/cpu_ops.h>
@@ -46,17 +47,58 @@ int arm_cpuidle_suspend(int index)
#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags))
+static int psci_acpi_cpu_init_idle(unsigned int cpu)
+{
+ int i, count;
+ struct acpi_lpi_state *lpi;
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+
+ /*
+ * If the PSCI cpu_suspend function hook has not been initialized
+ * idle states must not be enabled, so bail out
+ */
+ if (!psci_ops.cpu_suspend)
+ return -EOPNOTSUPP;
+
+ if (unlikely(!pr || !pr->flags.has_lpi))
+ return -EINVAL;
+
+ count = pr->power.count - 1;
+ if (count <= 0)
+ return -ENODEV;
+
+ for (i = 0; i < count; i++) {
+ u32 state;
+
+ lpi = &pr->power.lpi_states[i + 1];
+ /*
+ * Only bits[31:0] represent a PSCI power_state while
+ * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
+ */
+ state = lpi->address;
+ if (!psci_power_state_is_valid(state)) {
+ pr_warn("Invalid PSCI power state %#x\n", state);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
int acpi_processor_ffh_lpi_probe(unsigned int cpu)
{
- return arm_cpuidle_init(cpu);
+ return psci_acpi_cpu_init_idle(cpu);
}
int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
{
+ u32 state = lpi->address;
+
if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags))
- return CPU_PM_CPU_IDLE_ENTER_RETENTION(arm_cpuidle_suspend,
- lpi->index);
+ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(psci_cpu_suspend_enter,
+ lpi->index, state);
else
- return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, lpi->index);
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter,
+ lpi->index, state);
}
#endif
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index cc23302e9d95..989b1944cb71 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -310,15 +310,15 @@ __create_page_tables:
adrp x0, idmap_pg_dir
adrp x3, __idmap_text_start // __pa(__idmap_text_start)
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#ifdef CONFIG_ARM64_VA_BITS_52
mrs_s x6, SYS_ID_AA64MMFR2_EL1
and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
mov x5, #52
cbnz x6, 1f
#endif
- mov x5, #VA_BITS
+ mov x5, #VA_BITS_MIN
1:
- adr_l x6, vabits_user
+ adr_l x6, vabits_actual
str x5, [x6]
dmb sy
dc ivac, x6 // Invalidate potentially stale cache line
@@ -726,14 +726,22 @@ __secondary_switched:
adr_l x0, secondary_data
ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
+ cbz x1, __secondary_too_slow
mov sp, x1
ldr x2, [x0, #CPU_BOOT_TASK]
+ cbz x2, __secondary_too_slow
msr sp_el0, x2
mov x29, #0
mov x30, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
+__secondary_too_slow:
+ wfe
+ wfi
+ b __secondary_too_slow
+ENDPROC(__secondary_too_slow)
+
/*
* The booting CPU updates the failed status @__early_cpu_boot_status,
* with MMU turned off.
@@ -774,7 +782,7 @@ ENTRY(__enable_mmu)
phys_to_ttbr x1, x1
phys_to_ttbr x2, x2
msr ttbr0_el1, x2 // load TTBR0
- offset_ttbr1 x1
+ offset_ttbr1 x1, x3
msr ttbr1_el1, x1 // load TTBR1
isb
msr sctlr_el1, x0
@@ -791,8 +799,8 @@ ENTRY(__enable_mmu)
ENDPROC(__enable_mmu)
ENTRY(__cpu_secondary_check52bitva)
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
- ldr_l x0, vabits_user
+#ifdef CONFIG_ARM64_VA_BITS_52
+ ldr_l x0, vabits_actual
cmp x0, #52
b.ne 2f
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
index 2f4a2ce7264b..38bcd4d4e43b 100644
--- a/arch/arm64/kernel/hibernate-asm.S
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -22,14 +22,14 @@
* Even switching to our copied tables will cause a changed output address at
* each stage of the walk.
*/
-.macro break_before_make_ttbr_switch zero_page, page_table, tmp
+.macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
phys_to_ttbr \tmp, \zero_page
msr ttbr1_el1, \tmp
isb
tlbi vmalle1
dsb nsh
phys_to_ttbr \tmp, \page_table
- offset_ttbr1 \tmp
+ offset_ttbr1 \tmp, \tmp2
msr ttbr1_el1, \tmp
isb
.endm
@@ -70,7 +70,7 @@ ENTRY(swsusp_arch_suspend_exit)
* We execute from ttbr0, change ttbr1 to our copied linear map tables
* with a break-before-make via the zero page
*/
- break_before_make_ttbr_switch x5, x0, x6
+ break_before_make_ttbr_switch x5, x0, x6, x8
mov x21, x1
mov x30, x2
@@ -101,7 +101,7 @@ ENTRY(swsusp_arch_suspend_exit)
dsb ish /* wait for PoU cleaning to finish */
/* switch to the restored kernels page tables */
- break_before_make_ttbr_switch x25, x21, x6
+ break_before_make_ttbr_switch x25, x21, x6, x8
ic ialluis
dsb ish
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 9341fcc6e809..e0a7fce0e01c 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -496,7 +496,7 @@ int swsusp_arch_resume(void)
rc = -ENOMEM;
goto out;
}
- rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
+ rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END);
if (rc)
goto out;
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 708051655ad9..416f537bf614 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -62,9 +62,6 @@ out:
return default_cmdline;
}
-extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
- pgprot_t prot);
-
/*
* This routine will be executed with the kernel mapped at its default virtual
* address, and if it returns successfully, the kernel will be remapped, and
@@ -93,7 +90,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
* attempt at mapping the FDT in setup_machine()
*/
early_fixmap_init();
- fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
+ fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
if (!fdt)
return 0;
@@ -116,15 +113,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
/*
* OK, so we are proceeding with KASLR enabled. Calculate a suitable
* kernel image offset from the seed. Let's place the kernel in the
- * middle half of the VMALLOC area (VA_BITS - 2), and stay clear of
+ * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of
* the lower and upper quarters to avoid colliding with other
* allocations.
* Even if we could randomize at page granularity for 16k and 64k pages,
* let's always round to 2 MB so we don't interfere with the ability to
* map using contiguous PTEs
*/
- mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1);
- offset = BIT(VA_BITS - 3) + (seed & mask);
+ mask = ((1UL << (VA_BITS_MIN - 2)) - 1) & ~(SZ_2M - 1);
+ offset = BIT(VA_BITS_MIN - 3) + (seed & mask);
/* use the top 16 bits to randomize the linear region */
memstart_offset_seed = seed >> 48;
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index ba78ee7ca990..7b08bf9499b6 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -27,6 +27,8 @@
#define FDT_PROP_INITRD_END "linux,initrd-end"
#define FDT_PROP_BOOTARGS "bootargs"
#define FDT_PROP_KASLR_SEED "kaslr-seed"
+#define FDT_PROP_RNG_SEED "rng-seed"
+#define RNG_SEED_SIZE 128
const struct kexec_file_ops * const kexec_file_loaders[] = {
&kexec_image_ops,
@@ -102,6 +104,19 @@ static int setup_dtb(struct kimage *image,
FDT_PROP_KASLR_SEED);
}
+ /* add rng-seed */
+ if (rng_is_initialized()) {
+ u8 rng_seed[RNG_SEED_SIZE];
+ get_random_bytes(rng_seed, RNG_SEED_SIZE);
+ ret = fdt_setprop(dtb, off, FDT_PROP_RNG_SEED, rng_seed,
+ RNG_SEED_SIZE);
+ if (ret)
+ goto out;
+ } else {
+ pr_notice("RNG is not initialised: omitting \"%s\" property\n",
+ FDT_PROP_RNG_SEED);
+ }
+
out:
if (ret)
return (ret == -FDT_ERR_NOSPACE) ? -ENOMEM : -EINVAL;
@@ -110,7 +125,8 @@ out:
}
/*
- * More space needed so that we can add initrd, bootargs and kaslr-seed.
+ * More space needed so that we can add initrd, bootargs, kaslr-seed, and
+ * rng-seed.
*/
#define DTB_EXTRA_SPACE 0x1000
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 96e90e270042..a0b4f1bca491 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
+#include <linux/smp.h>
/* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
@@ -157,7 +158,6 @@ armv8pmu_events_sysfs_show(struct device *dev,
return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
}
-#define ARMV8_EVENT_ATTR_RESOLVE(m) #m
#define ARMV8_EVENT_ATTR(name, config) \
PMU_EVENT_ATTR(name, armv8_event_attr_##name, \
config, armv8pmu_events_sysfs_show)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index f674f28df663..03689c0beb34 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
+#include <linux/sysctl.h>
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/delay.h>
@@ -38,6 +39,7 @@
#include <trace/events/power.h>
#include <linux/percpu.h>
#include <linux/thread_info.h>
+#include <linux/prctl.h>
#include <asm/alternative.h>
#include <asm/arch_gicv3.h>
@@ -307,11 +309,18 @@ static void tls_thread_flush(void)
}
}
+static void flush_tagged_addr_state(void)
+{
+ if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
+ clear_thread_flag(TIF_TAGGED_ADDR);
+}
+
void flush_thread(void)
{
fpsimd_flush_thread();
tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
+ flush_tagged_addr_state();
}
void release_thread(struct task_struct *dead_task)
@@ -565,3 +574,70 @@ void arch_setup_new_exec(void)
ptrauth_thread_init_user(current);
}
+
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+/*
+ * Control the relaxed ABI allowing tagged user addresses into the kernel.
+ */
+static unsigned int tagged_addr_disabled;
+
+long set_tagged_addr_ctrl(unsigned long arg)
+{
+ if (is_compat_task())
+ return -EINVAL;
+ if (arg & ~PR_TAGGED_ADDR_ENABLE)
+ return -EINVAL;
+
+ /*
+ * Do not allow the enabling of the tagged address ABI if globally
+ * disabled via sysctl abi.tagged_addr_disabled.
+ */
+ if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
+ return -EINVAL;
+
+ update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
+
+ return 0;
+}
+
+long get_tagged_addr_ctrl(void)
+{
+ if (is_compat_task())
+ return -EINVAL;
+
+ if (test_thread_flag(TIF_TAGGED_ADDR))
+ return PR_TAGGED_ADDR_ENABLE;
+
+ return 0;
+}
+
+/*
+ * Global sysctl to disable the tagged user addresses support. This control
+ * only prevents the tagged address ABI enabling via prctl() and does not
+ * disable it for tasks that already opted in to the relaxed ABI.
+ */
+static int zero;
+static int one = 1;
+
+static struct ctl_table tagged_addr_sysctl_table[] = {
+ {
+ .procname = "tagged_addr_disabled",
+ .mode = 0644,
+ .data = &tagged_addr_disabled,
+ .maxlen = sizeof(int),
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ { }
+};
+
+static int __init tagged_addr_init(void)
+{
+ if (!register_sysctl("abi", tagged_addr_sysctl_table))
+ return -EINVAL;
+ return 0;
+}
+
+core_initcall(tagged_addr_init);
+#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 85ee7d07889e..c9f72b2665f1 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -46,6 +46,11 @@ static int cpu_psci_cpu_boot(unsigned int cpu)
}
#ifdef CONFIG_HOTPLUG_CPU
+static bool cpu_psci_cpu_can_disable(unsigned int cpu)
+{
+ return !psci_tos_resident_on(cpu);
+}
+
static int cpu_psci_cpu_disable(unsigned int cpu)
{
/* Fail early if we don't have CPU_OFF support */
@@ -105,14 +110,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
const struct cpu_operations cpu_psci_ops = {
.name = "psci",
-#ifdef CONFIG_CPU_IDLE
- .cpu_init_idle = psci_cpu_init_idle,
- .cpu_suspend = psci_cpu_suspend_enter,
-#endif
.cpu_init = cpu_psci_cpu_init,
.cpu_prepare = cpu_psci_cpu_prepare,
.cpu_boot = cpu_psci_cpu_boot,
#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_can_disable = cpu_psci_cpu_can_disable,
.cpu_disable = cpu_psci_cpu_disable,
.cpu_die = cpu_psci_cpu_die,
.cpu_kill = cpu_psci_cpu_kill,
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 9c4bad7d7131..56f664561754 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -170,9 +170,13 @@ static void __init smp_build_mpidr_hash(void)
static void __init setup_machine_fdt(phys_addr_t dt_phys)
{
- void *dt_virt = fixmap_remap_fdt(dt_phys);
+ int size;
+ void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
const char *name;
+ if (dt_virt)
+ memblock_reserve(dt_phys, size);
+
if (!dt_virt || !early_init_dt_scan(dt_virt)) {
pr_crit("\n"
"Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
@@ -184,6 +188,9 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
cpu_relax();
}
+ /* Early fixups are done, map the FDT as read-only now */
+ fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
+
name = of_flat_dt_get_machine_name();
if (!name)
return;
@@ -357,6 +364,15 @@ void __init setup_arch(char **cmdline_p)
}
}
+static inline bool cpu_can_disable(unsigned int cpu)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_can_disable)
+ return cpu_ops[cpu]->cpu_can_disable(cpu);
+#endif
+ return false;
+}
+
static int __init topology_init(void)
{
int i;
@@ -366,7 +382,7 @@ static int __init topology_init(void)
for_each_possible_cpu(i) {
struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
- cpu->hotpluggable = 1;
+ cpu->hotpluggable = cpu_can_disable(i);
register_cpu(cpu, i);
}
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 018a33e01b0e..dc9fe879c279 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -123,7 +123,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
* time out.
*/
wait_for_completion_timeout(&cpu_running,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(5000));
if (!cpu_online(cpu)) {
pr_crit("CPU%u: failed to come online\n", cpu);
@@ -136,6 +136,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
secondary_data.task = NULL;
secondary_data.stack = NULL;
+ __flush_dcache_area(&secondary_data, sizeof(secondary_data));
status = READ_ONCE(secondary_data.status);
if (ret && status) {
@@ -146,6 +147,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
default:
pr_err("CPU%u: failed in unknown state : 0x%lx\n",
cpu, status);
+ cpus_stuck_in_kernel++;
break;
case CPU_KILL_ME:
if (!op_cpu_kill(cpu)) {
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 0825c4a856e3..fa9528dfd0ce 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -14,250 +14,13 @@
#include <linux/acpi.h>
#include <linux/arch_topology.h>
#include <linux/cacheinfo.h>
-#include <linux/cpu.h>
-#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/percpu.h>
-#include <linux/node.h>
-#include <linux/nodemask.h>
-#include <linux/of.h>
-#include <linux/sched.h>
-#include <linux/sched/topology.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-#include <linux/string.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/topology.h>
-static int __init get_cpu_for_node(struct device_node *node)
-{
- struct device_node *cpu_node;
- int cpu;
-
- cpu_node = of_parse_phandle(node, "cpu", 0);
- if (!cpu_node)
- return -1;
-
- cpu = of_cpu_node_to_id(cpu_node);
- if (cpu >= 0)
- topology_parse_cpu_capacity(cpu_node, cpu);
- else
- pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
-
- of_node_put(cpu_node);
- return cpu;
-}
-
-static int __init parse_core(struct device_node *core, int package_id,
- int core_id)
-{
- char name[10];
- bool leaf = true;
- int i = 0;
- int cpu;
- struct device_node *t;
-
- do {
- snprintf(name, sizeof(name), "thread%d", i);
- t = of_get_child_by_name(core, name);
- if (t) {
- leaf = false;
- cpu = get_cpu_for_node(t);
- if (cpu >= 0) {
- cpu_topology[cpu].package_id = package_id;
- cpu_topology[cpu].core_id = core_id;
- cpu_topology[cpu].thread_id = i;
- } else {
- pr_err("%pOF: Can't get CPU for thread\n",
- t);
- of_node_put(t);
- return -EINVAL;
- }
- of_node_put(t);
- }
- i++;
- } while (t);
-
- cpu = get_cpu_for_node(core);
- if (cpu >= 0) {
- if (!leaf) {
- pr_err("%pOF: Core has both threads and CPU\n",
- core);
- return -EINVAL;
- }
-
- cpu_topology[cpu].package_id = package_id;
- cpu_topology[cpu].core_id = core_id;
- } else if (leaf) {
- pr_err("%pOF: Can't get CPU for leaf core\n", core);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int __init parse_cluster(struct device_node *cluster, int depth)
-{
- char name[10];
- bool leaf = true;
- bool has_cores = false;
- struct device_node *c;
- static int package_id __initdata;
- int core_id = 0;
- int i, ret;
-
- /*
- * First check for child clusters; we currently ignore any
- * information about the nesting of clusters and present the
- * scheduler with a flat list of them.
- */
- i = 0;
- do {
- snprintf(name, sizeof(name), "cluster%d", i);
- c = of_get_child_by_name(cluster, name);
- if (c) {
- leaf = false;
- ret = parse_cluster(c, depth + 1);
- of_node_put(c);
- if (ret != 0)
- return ret;
- }
- i++;
- } while (c);
-
- /* Now check for cores */
- i = 0;
- do {
- snprintf(name, sizeof(name), "core%d", i);
- c = of_get_child_by_name(cluster, name);
- if (c) {
- has_cores = true;
-
- if (depth == 0) {
- pr_err("%pOF: cpu-map children should be clusters\n",
- c);
- of_node_put(c);
- return -EINVAL;
- }
-
- if (leaf) {
- ret = parse_core(c, package_id, core_id++);
- } else {
- pr_err("%pOF: Non-leaf cluster with core %s\n",
- cluster, name);
- ret = -EINVAL;
- }
-
- of_node_put(c);
- if (ret != 0)
- return ret;
- }
- i++;
- } while (c);
-
- if (leaf && !has_cores)
- pr_warn("%pOF: empty cluster\n", cluster);
-
- if (leaf)
- package_id++;
-
- return 0;
-}
-
-static int __init parse_dt_topology(void)
-{
- struct device_node *cn, *map;
- int ret = 0;
- int cpu;
-
- cn = of_find_node_by_path("/cpus");
- if (!cn) {
- pr_err("No CPU information found in DT\n");
- return 0;
- }
-
- /*
- * When topology is provided cpu-map is essentially a root
- * cluster with restricted subnodes.
- */
- map = of_get_child_by_name(cn, "cpu-map");
- if (!map)
- goto out;
-
- ret = parse_cluster(map, 0);
- if (ret != 0)
- goto out_map;
-
- topology_normalize_cpu_scale();
-
- /*
- * Check that all cores are in the topology; the SMP code will
- * only mark cores described in the DT as possible.
- */
- for_each_possible_cpu(cpu)
- if (cpu_topology[cpu].package_id == -1)
- ret = -EINVAL;
-
-out_map:
- of_node_put(map);
-out:
- of_node_put(cn);
- return ret;
-}
-
-/*
- * cpu topology table
- */
-struct cpu_topology cpu_topology[NR_CPUS];
-EXPORT_SYMBOL_GPL(cpu_topology);
-
-const struct cpumask *cpu_coregroup_mask(int cpu)
-{
- const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
-
- /* Find the smaller of NUMA, core or LLC siblings */
- if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
- /* not numa in package, lets use the package siblings */
- core_mask = &cpu_topology[cpu].core_sibling;
- }
- if (cpu_topology[cpu].llc_id != -1) {
- if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
- core_mask = &cpu_topology[cpu].llc_sibling;
- }
-
- return core_mask;
-}
-
-static void update_siblings_masks(unsigned int cpuid)
-{
- struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
- int cpu;
-
- /* update core and thread sibling masks */
- for_each_online_cpu(cpu) {
- cpu_topo = &cpu_topology[cpu];
-
- if (cpuid_topo->llc_id == cpu_topo->llc_id) {
- cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
- cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
- }
-
- if (cpuid_topo->package_id != cpu_topo->package_id)
- continue;
-
- cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
- cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
-
- if (cpuid_topo->core_id != cpu_topo->core_id)
- continue;
-
- cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
- cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
- }
-}
-
void store_cpu_topology(unsigned int cpuid)
{
struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
@@ -296,60 +59,31 @@ topology_populated:
update_siblings_masks(cpuid);
}
-static void clear_cpu_topology(int cpu)
-{
- struct cpu_topology *cpu_topo = &cpu_topology[cpu];
-
- cpumask_clear(&cpu_topo->llc_sibling);
- cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
-
- cpumask_clear(&cpu_topo->core_sibling);
- cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
- cpumask_clear(&cpu_topo->thread_sibling);
- cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
-}
-
-static void __init reset_cpu_topology(void)
-{
- unsigned int cpu;
-
- for_each_possible_cpu(cpu) {
- struct cpu_topology *cpu_topo = &cpu_topology[cpu];
-
- cpu_topo->thread_id = -1;
- cpu_topo->core_id = 0;
- cpu_topo->package_id = -1;
- cpu_topo->llc_id = -1;
-
- clear_cpu_topology(cpu);
- }
-}
-
-void remove_cpu_topology(unsigned int cpu)
+#ifdef CONFIG_ACPI
+static bool __init acpi_cpu_is_threaded(int cpu)
{
- int sibling;
+ int is_threaded = acpi_pptt_cpu_is_thread(cpu);
- for_each_cpu(sibling, topology_core_cpumask(cpu))
- cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
- for_each_cpu(sibling, topology_sibling_cpumask(cpu))
- cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
- for_each_cpu(sibling, topology_llc_cpumask(cpu))
- cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
+ /*
+ * if the PPTT doesn't have thread information, assume a homogeneous
+ * machine and return the current CPU's thread state.
+ */
+ if (is_threaded < 0)
+ is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
- clear_cpu_topology(cpu);
+ return !!is_threaded;
}
-#ifdef CONFIG_ACPI
/*
* Propagate the topology information of the processor_topology_node tree to the
* cpu_topology array.
*/
-static int __init parse_acpi_topology(void)
+int __init parse_acpi_topology(void)
{
- bool is_threaded;
int cpu, topology_id;
- is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
+ if (acpi_disabled)
+ return 0;
for_each_possible_cpu(cpu) {
int i, cache_id;
@@ -358,7 +92,7 @@ static int __init parse_acpi_topology(void)
if (topology_id < 0)
return topology_id;
- if (is_threaded) {
+ if (acpi_cpu_is_threaded(cpu)) {
cpu_topology[cpu].thread_id = topology_id;
topology_id = find_acpi_cpu_topology(cpu, 1);
cpu_topology[cpu].core_id = topology_id;
@@ -384,24 +118,6 @@ static int __init parse_acpi_topology(void)
return 0;
}
-
-#else
-static inline int __init parse_acpi_topology(void)
-{
- return -EINVAL;
-}
#endif
-void __init init_cpu_topology(void)
-{
- reset_cpu_topology();
- /*
- * Discard anything that was parsed if we hit an error so we
- * don't use partial information.
- */
- if (!acpi_disabled && parse_acpi_topology())
- reset_cpu_topology();
- else if (of_have_populated_dt() && parse_dt_topology())
- reset_cpu_topology();
-}