diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-12 10:04:09 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-12 10:04:09 -0700 |
commit | dc2a24816637ff6c60f08c4245aba01c6e9b6a79 (patch) | |
tree | 15a4a6462323beb7aed6eb9bb44192439dd0452a /arch | |
parent | ac3c4aa248c5b5390c40fad1ceb0a15a53f57a36 (diff) | |
parent | 5a61ef74f269f2573f48fa53607a8911216c3326 (diff) | |
download | linux-stable-dc2a24816637ff6c60f08c4245aba01c6e9b6a79.tar.gz linux-stable-dc2a24816637ff6c60f08c4245aba01c6e9b6a79.tar.bz2 linux-stable-dc2a24816637ff6c60f08c4245aba01c6e9b6a79.zip |
Merge tag 'powerpc-4.12-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull more powerpc updates from Michael Ellerman:
"The change to the Linux page table geometry was delayed for more
testing with 16G pages, and there's the new CPU features stuff which
just needed one more polish before going in. Plus a few changes from
Scott which came in a bit late. And then various fixes, mostly minor.
Summary highlights:
- rework the Linux page table geometry to lower memory usage on
64-bit Book3S (IBM chips) using the Hash MMU.
- support for a new device tree binding for discovering CPU features
on future firmwares.
- Freescale updates from Scott:
"Includes a fix for a powerpc/next mm regression on 64e, a fix for
a kernel hang on 64e when using a debugger inside a relocated
kernel, a qman fix, and misc qe improvements."
Thanks to: Christophe Leroy, Gavin Shan, Horia Geantă, LiuHailong,
Nicholas Piggin, Roy Pledge, Scott Wood, Valentin Longchamp"
* tag 'powerpc-4.12-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc/64s: Support new device tree binding for discovering CPU features
powerpc: Don't print cpu_spec->cpu_name if it's NULL
of/fdt: introduce of_scan_flat_dt_subnodes and of_get_flat_dt_phandle
powerpc/64s: Fix unnecessary machine check handler relocation branch
powerpc/mm/book3s/64: Rework page table geometry for lower memory usage
powerpc: Fix distclean with Makefile.postlink
powerpc/64e: Don't place the stack beyond TASK_SIZE
powerpc/powernv: Block PCI config access on BCM5718 during EEH recovery
powerpc/8xx: Adding support of IRQ in MPC8xx GPIO
soc/fsl/qbman: Disable IRQs for deferred QBMan work
soc/fsl/qe: add EXPORT_SYMBOL for the 2 qe_tdm functions
soc/fsl/qe: only apply QE_General4 workaround on affected SoCs
soc/fsl/qe: round brg_freq to 1kHz granularity
soc/fsl/qe: get rid of immrbar_virt_to_phys()
net: ethernet: ucc_geth: fix MEM_PART_MURAM mode
powerpc/64e: Fix hang when debugging programs with relocated kernel
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/Kconfig | 16 | ||||
-rw-r--r-- | arch/powerpc/Makefile.postlink | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/hash-64k.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/cpm1.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/cpu_has_feature.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/cputable.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/dt_cpu_ftrs.h | 26 | ||||
-rw-r--r-- | arch/powerpc/include/asm/processor.h | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/reg.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/uapi/asm/cputable.h | 7 | ||||
-rw-r--r-- | arch/powerpc/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/cputable.c | 37 | ||||
-rw-r--r-- | arch/powerpc/kernel/dt_cpu_ftrs.c | 1031 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64e.S | 12 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 29 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup-common.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 10 | ||||
-rw-r--r-- | arch/powerpc/platforms/powernv/eeh-powernv.c | 3 | ||||
-rw-r--r-- | arch/powerpc/sysdev/cpm1.c | 25 |
20 files changed, 1203 insertions, 24 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 964da1891ea9..f7c8f9972f61 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -380,6 +380,22 @@ source "arch/powerpc/platforms/Kconfig" menu "Kernel options" +config PPC_DT_CPU_FTRS + bool "Device-tree based CPU feature discovery & setup" + depends on PPC_BOOK3S_64 + default n + help + This enables code to use a new device tree binding for describing CPU + compatibility and features. Saying Y here will attempt to use the new + binding if the firmware provides it. Currently only the skiboot + firmware provides this binding. + If you're not sure say Y. + +config PPC_CPUFEATURES_ENABLE_UNKNOWN + bool "cpufeatures pass through unknown features to guest/userspace" + depends on PPC_DT_CPU_FTRS + default y + config HIGHMEM bool "High memory support" depends on PPC32 diff --git a/arch/powerpc/Makefile.postlink b/arch/powerpc/Makefile.postlink index 3c22d64b2de9..eccfcc88afae 100644 --- a/arch/powerpc/Makefile.postlink +++ b/arch/powerpc/Makefile.postlink @@ -7,7 +7,7 @@ PHONY := __archpost __archpost: -include include/config/auto.conf +-include include/config/auto.conf include scripts/Kbuild.include quiet_cmd_relocs_check = CHKREL $@ diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index 214219dff87c..9732837aaae8 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -2,9 +2,9 @@ #define _ASM_POWERPC_BOOK3S_64_HASH_64K_H #define H_PTE_INDEX_SIZE 8 -#define H_PMD_INDEX_SIZE 5 -#define H_PUD_INDEX_SIZE 5 -#define H_PGD_INDEX_SIZE 15 +#define H_PMD_INDEX_SIZE 10 +#define H_PUD_INDEX_SIZE 7 +#define H_PGD_INDEX_SIZE 8 /* * 64k aligned address free up few of the lower bits of RPN for us diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h index 8ee4211ca0c6..14ad37865000 100644 --- a/arch/powerpc/include/asm/cpm1.h +++ b/arch/powerpc/include/asm/cpm1.h @@ -560,6 +560,8 @@ typedef struct risc_timer_pram { #define CPM_PIN_SECONDARY 2 #define CPM_PIN_GPIO 4 #define CPM_PIN_OPENDRAIN 8 +#define CPM_PIN_FALLEDGE 16 +#define CPM_PIN_ANYEDGE 0 enum cpm_port { CPM_PORTA, diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h index 6e834caa3720..0d1df02bf99d 100644 --- a/arch/powerpc/include/asm/cpu_has_feature.h +++ b/arch/powerpc/include/asm/cpu_has_feature.h @@ -1,5 +1,5 @@ -#ifndef __ASM_POWERPC_CPUFEATURES_H -#define __ASM_POWERPC_CPUFEATURES_H +#ifndef __ASM_POWERPC_CPU_HAS_FEATURE_H +#define __ASM_POWERPC_CPU_HAS_FEATURE_H #ifndef __ASSEMBLY__ @@ -52,4 +52,4 @@ static inline bool cpu_has_feature(unsigned long feature) #endif #endif /* __ASSEMBLY__ */ -#endif /* __ASM_POWERPC_CPUFEATURE_H */ +#endif /* __ASM_POWERPC_CPU_HAS_FEATURE_H */ diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 1f6847b107e4..c2d509584a98 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -118,7 +118,9 @@ extern struct cpu_spec *cur_cpu_spec; extern unsigned int __start___ftr_fixup, __stop___ftr_fixup; +extern void set_cur_cpu_spec(struct cpu_spec *s); extern struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr); +extern void identify_cpu_name(unsigned int pvr); extern void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end); diff --git a/arch/powerpc/include/asm/dt_cpu_ftrs.h b/arch/powerpc/include/asm/dt_cpu_ftrs.h new file mode 100644 index 000000000000..7a34fc11bf63 --- /dev/null +++ b/arch/powerpc/include/asm/dt_cpu_ftrs.h @@ -0,0 +1,26 @@ +#ifndef __ASM_POWERPC_DT_CPU_FTRS_H +#define __ASM_POWERPC_DT_CPU_FTRS_H + +/* + * Copyright 2017, IBM Corporation + * cpufeatures is the new way to discover CPU features with /cpus/features + * devicetree. This supersedes PVR based discovery ("cputable"), and older + * device tree feature advertisement. + */ + +#include <linux/types.h> +#include <asm/asm-compat.h> +#include <asm/feature-fixups.h> +#include <uapi/asm/cputable.h> + +#ifdef CONFIG_PPC_DT_CPU_FTRS +bool dt_cpu_ftrs_init(void *fdt); +void dt_cpu_ftrs_scan(void); +bool dt_cpu_ftrs_in_use(void); +#else +static inline bool dt_cpu_ftrs_init(void *fdt) { return false; } +static inline void dt_cpu_ftrs_scan(void) { } +static inline bool dt_cpu_ftrs_in_use(void) { return false; } +#endif + +#endif /* __ASM_POWERPC_DT_CPU_FTRS_H */ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index a4b1d8d6b793..a2123f291ab0 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -151,8 +151,13 @@ void release_thread(struct task_struct *); #ifdef __powerpc64__ +#ifdef CONFIG_PPC_BOOK3S_64 /* Limit stack to 128TB */ #define STACK_TOP_USER64 TASK_SIZE_128TB +#else +#define STACK_TOP_USER64 TASK_SIZE_USER64 +#endif + #define STACK_TOP_USER32 TASK_SIZE_USER32 #define STACK_TOP (is_32bit_task() ? \ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index d4f653c9259a..7e50e47375d6 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1229,6 +1229,7 @@ #define PVR_POWER8E 0x004B #define PVR_POWER8NVL 0x004C #define PVR_POWER8 0x004D +#define PVR_POWER9 0x004E #define PVR_BE 0x0070 #define PVR_PA6T 0x0090 diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h index f63c96cd3608..3e7ce86d5c13 100644 --- a/arch/powerpc/include/uapi/asm/cputable.h +++ b/arch/powerpc/include/uapi/asm/cputable.h @@ -47,4 +47,11 @@ #define PPC_FEATURE2_ARCH_3_00 0x00800000 /* ISA 3.00 */ #define PPC_FEATURE2_HAS_IEEE128 0x00400000 /* VSX IEEE Binary Float 128-bit */ +/* + * IMPORTANT! + * All future PPC_FEATURE definitions should be allocated in cooperation with + * OPAL / skiboot firmware, in accordance with the ibm,powerpc-cpu-features + * device tree binding. + */ + #endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index b9db46ae545b..e132902e1f14 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -56,6 +56,7 @@ obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y) obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o obj-$(CONFIG_RTAS_PROC) += rtas-proc.o +obj-$(CONFIG_PPC_DT_CPU_FTRS) += dt_cpu_ftrs.o obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \ eeh_driver.o eeh_event.o eeh_sysfs.o obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index e79b9daa873c..9b3e88b1a9c8 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -23,7 +23,9 @@ #include <asm/mmu.h> #include <asm/setup.h> -struct cpu_spec* cur_cpu_spec = NULL; +static struct cpu_spec the_cpu_spec __read_mostly; + +struct cpu_spec* cur_cpu_spec __read_mostly = NULL; EXPORT_SYMBOL(cur_cpu_spec); /* The platform string corresponding to the real PVR */ @@ -2179,7 +2181,15 @@ static struct cpu_spec __initdata cpu_specs[] = { #endif /* CONFIG_E500 */ }; -static struct cpu_spec the_cpu_spec; +void __init set_cur_cpu_spec(struct cpu_spec *s) +{ + struct cpu_spec *t = &the_cpu_spec; + + t = PTRRELOC(t); + *t = *s; + + *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec; +} static struct cpu_spec * __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) @@ -2266,6 +2276,29 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) return NULL; } +/* + * Used by cpufeatures to get the name for CPUs with a PVR table. + * If they don't hae a PVR table, cpufeatures gets the name from + * cpu device-tree node. + */ +void __init identify_cpu_name(unsigned int pvr) +{ + struct cpu_spec *s = cpu_specs; + struct cpu_spec *t = &the_cpu_spec; + int i; + + s = PTRRELOC(s); + t = PTRRELOC(t); + + for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) { + if ((pvr & s->pvr_mask) == s->pvr_value) { + t->cpu_name = s->cpu_name; + return; + } + } +} + + #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS] = { [0 ... NUM_CPU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c new file mode 100644 index 000000000000..fcc7588a96d6 --- /dev/null +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -0,0 +1,1031 @@ +/* + * Copyright 2017, Nicholas Piggin, IBM Corporation + * Licensed under GPLv2. + */ + +#define pr_fmt(fmt) "dt-cpu-ftrs: " fmt + +#include <linux/export.h> +#include <linux/init.h> +#include <linux/jump_label.h> +#include <linux/memblock.h> +#include <linux/printk.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/threads.h> + +#include <asm/cputable.h> +#include <asm/dt_cpu_ftrs.h> +#include <asm/mmu.h> +#include <asm/oprofile_impl.h> +#include <asm/prom.h> +#include <asm/setup.h> + + +/* Device-tree visible constants follow */ +#define ISA_V2_07B 2070 +#define ISA_V3_0B 3000 + +#define USABLE_PR (1U << 0) +#define USABLE_OS (1U << 1) +#define USABLE_HV (1U << 2) + +#define HV_SUPPORT_HFSCR (1U << 0) +#define OS_SUPPORT_FSCR (1U << 0) + +/* For parsing, we define all bits set as "NONE" case */ +#define HV_SUPPORT_NONE 0xffffffffU +#define OS_SUPPORT_NONE 0xffffffffU + +struct dt_cpu_feature { + const char *name; + uint32_t isa; + uint32_t usable_privilege; + uint32_t hv_support; + uint32_t os_support; + uint32_t hfscr_bit_nr; + uint32_t fscr_bit_nr; + uint32_t hwcap_bit_nr; + /* fdt parsing */ + unsigned long node; + int enabled; + int disabled; +}; + +#define CPU_FTRS_BASE \ + (CPU_FTR_USE_TB | \ + CPU_FTR_LWSYNC | \ + CPU_FTR_FPU_UNAVAILABLE |\ + CPU_FTR_NODSISRALIGN |\ + CPU_FTR_NOEXECUTE |\ + CPU_FTR_COHERENT_ICACHE | \ + CPU_FTR_STCX_CHECKS_ADDRESS |\ + CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ + CPU_FTR_DAWR | \ + CPU_FTR_ARCH_206 |\ + CPU_FTR_ARCH_207S) + +#define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8) + +#define COMMON_USER_BASE (PPC_FEATURE_32 | PPC_FEATURE_64 | \ + PPC_FEATURE_ARCH_2_06 |\ + PPC_FEATURE_ICACHE_SNOOP) +#define COMMON_USER2_BASE (PPC_FEATURE2_ARCH_2_07 | \ + PPC_FEATURE2_ISEL) +/* + * Set up the base CPU + */ + +extern void __flush_tlb_power8(unsigned int action); +extern void __flush_tlb_power9(unsigned int action); +extern long __machine_check_early_realmode_p8(struct pt_regs *regs); +extern long __machine_check_early_realmode_p9(struct pt_regs *regs); + +static int hv_mode; + +static struct { + u64 lpcr; + u64 hfscr; + u64 fscr; +} system_registers; + +static void (*init_pmu_registers)(void); + +static void cpufeatures_flush_tlb(void) +{ + unsigned long rb; + unsigned int i, num_sets; + + /* + * This is a temporary measure to keep equivalent TLB flush as the + * cputable based setup code. + */ + switch (PVR_VER(mfspr(SPRN_PVR))) { + case PVR_POWER8: + case PVR_POWER8E: + case PVR_POWER8NVL: + num_sets = POWER8_TLB_SETS; + break; + case PVR_POWER9: + num_sets = POWER9_TLB_SETS_HASH; + break; + default: + num_sets = 1; + pr_err("unknown CPU version for boot TLB flush\n"); + break; + } + + asm volatile("ptesync" : : : "memory"); + rb = TLBIEL_INVAL_SET; + for (i = 0; i < num_sets; i++) { + asm volatile("tlbiel %0" : : "r" (rb)); + rb += 1 << TLBIEL_INVAL_SET_SHIFT; + } + asm volatile("ptesync" : : : "memory"); +} + +static void __restore_cpu_cpufeatures(void) +{ + /* + * LPCR is restored by the power on engine already. It can be changed + * after early init e.g., by radix enable, and we have no unified API + * for saving and restoring such SPRs. + * + * This ->restore hook should really be removed from idle and register + * restore moved directly into the idle restore code, because this code + * doesn't know how idle is implemented or what it needs restored here. + * + * The best we can do to accommodate secondary boot and idle restore + * for now is "or" LPCR with existing. + */ + + mtspr(SPRN_LPCR, system_registers.lpcr | mfspr(SPRN_LPCR)); + if (hv_mode) { + mtspr(SPRN_LPID, 0); + mtspr(SPRN_HFSCR, system_registers.hfscr); + } + mtspr(SPRN_FSCR, system_registers.fscr); + + if (init_pmu_registers) + init_pmu_registers(); + + cpufeatures_flush_tlb(); +} + +static char dt_cpu_name[64]; + +static struct cpu_spec __initdata base_cpu_spec = { + .cpu_name = NULL, + .cpu_features = CPU_FTRS_BASE, + .cpu_user_features = COMMON_USER_BASE, + .cpu_user_features2 = COMMON_USER2_BASE, + .mmu_features = 0, + .icache_bsize = 32, /* minimum block size, fixed by */ + .dcache_bsize = 32, /* cache info init. */ + .num_pmcs = 0, + .pmc_type = PPC_PMC_DEFAULT, + .oprofile_cpu_type = NULL, + .oprofile_type = PPC_OPROFILE_INVALID, + .cpu_setup = NULL, + .cpu_restore = __restore_cpu_cpufeatures, + .flush_tlb = NULL, + .machine_check_early = NULL, + .platform = NULL, +}; + +static void __init cpufeatures_setup_cpu(void) +{ + set_cur_cpu_spec(&base_cpu_spec); + + cur_cpu_spec->pvr_mask = -1; + cur_cpu_spec->pvr_value = mfspr(SPRN_PVR); + + /* Initialize the base environment -- clear FSCR/HFSCR. */ + hv_mode = !!(mfmsr() & MSR_HV); + if (hv_mode) { + /* CPU_FTR_HVMODE is used early in PACA setup */ + cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE; + mtspr(SPRN_HFSCR, 0); + } + mtspr(SPRN_FSCR, 0); + + /* + * LPCR does not get cleared, to match behaviour with secondaries + * in __restore_cpu_cpufeatures. Once the idle code is fixed, this + * could clear LPCR too. + */ +} + +static int __init feat_try_enable_unknown(struct dt_cpu_feature *f) +{ + if (f->hv_support == HV_SUPPORT_NONE) { + } else if (f->hv_support & HV_SUPPORT_HFSCR) { + u64 hfscr = mfspr(SPRN_HFSCR); + hfscr |= 1UL << f->hfscr_bit_nr; + mtspr(SPRN_HFSCR, hfscr); + } else { + /* Does not have a known recipe */ + return 0; + } + + if (f->os_support == OS_SUPPORT_NONE) { + } else if (f->os_support & OS_SUPPORT_FSCR) { + u64 fscr = mfspr(SPRN_FSCR); + fscr |= 1UL << f->fscr_bit_nr; + mtspr(SPRN_FSCR, fscr); + } else { + /* Does not have a known recipe */ + return 0; + } + + if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) { + uint32_t word = f->hwcap_bit_nr / 32; + uint32_t bit = f->hwcap_bit_nr % 32; + + if (word == 0) + cur_cpu_spec->cpu_user_features |= 1U << bit; + else if (word == 1) + cur_cpu_spec->cpu_user_features2 |= 1U << bit; + else + pr_err("%s could not advertise to user (no hwcap bits)\n", f->name); + } + + return 1; +} + +static int __init feat_enable(struct dt_cpu_feature *f) +{ + if (f->hv_support != HV_SUPPORT_NONE) { + if (f->hfscr_bit_nr != -1) { + u64 hfscr = mfspr(SPRN_HFSCR); + hfscr |= 1UL << f->hfscr_bit_nr; + mtspr(SPRN_HFSCR, hfscr); + } + } + + if (f->os_support != OS_SUPPORT_NONE) { + if (f->fscr_bit_nr != -1) { + u64 fscr = mfspr(SPRN_FSCR); + fscr |= 1UL << f->fscr_bit_nr; + mtspr(SPRN_FSCR, fscr); + } + } + + if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) { + uint32_t word = f->hwcap_bit_nr / 32; + uint32_t bit = f->hwcap_bit_nr % 32; + + if (word == 0) + cur_cpu_spec->cpu_user_features |= 1U << bit; + else if (word == 1) + cur_cpu_spec->cpu_user_features2 |= 1U << bit; + else + pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f->name); + } + + return 1; +} + +static int __init feat_disable(struct dt_cpu_feature *f) +{ + return 0; +} + +static int __init feat_enable_hv(struct dt_cpu_feature *f) +{ + u64 lpcr; + + if (!hv_mode) { + pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n"); + return 0; + } + + mtspr(SPRN_LPID, 0); + + lpcr = mfspr(SPRN_LPCR); + lpcr &= ~LPCR_LPES0; /* HV external interrupts */ + mtspr(SPRN_LPCR, lpcr); + + cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE; + + return 1; +} + +static int __init feat_enable_le(struct dt_cpu_feature *f) +{ + cur_cpu_spec->cpu_user_features |= PPC_FEATURE_TRUE_LE; + return 1; +} + +static int __init feat_enable_smt(struct dt_cpu_feature *f) +{ + cur_cpu_spec->cpu_features |= CPU_FTR_SMT; + cur_cpu_spec->cpu_user_features |= PPC_FEATURE_SMT; + return 1; +} + +static int __init feat_enable_idle_nap(struct dt_cpu_feature *f) +{ + u64 lpcr; + + /* Set PECE wakeup modes for ISA 207 */ + lpcr = mfspr(SPRN_LPCR); + lpcr |= LPCR_PECE0; + lpcr |= LPCR_PECE1; + lpcr |= LPCR_PECE2; + mtspr(SPRN_LPCR, lpcr); + + return 1; +} + +static int __init feat_enable_align_dsisr(struct dt_cpu_feature *f) +{ + cur_cpu_spec->cpu_features &= ~CPU_FTR_NODSISRALIGN; + + return 1; +} + +static int __init feat_enable_idle_stop(struct dt_cpu_feature *f) +{ + u64 lpcr; + + /* Set PECE wakeup modes for ISAv3.0B */ + lpcr = mfspr(SPRN_LPCR); + lpcr |= LPCR_PECE0; + lpcr |= LPCR_PECE1; + lpcr |= LPCR_PECE2; + mtspr(SPRN_LPCR, lpcr); + + return 1; +} + +static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f) +{ + u64 lpcr; + + lpcr = mfspr(SPRN_LPCR); + lpcr &= ~LPCR_ISL; + + /* VRMASD */ + lpcr |= LPCR_VPM0; + lpcr &= ~LPCR_VPM1; + lpcr |= 0x10UL << LPCR_VRMASD_SH; /* L=1 LP=00 */ + mtspr(SPRN_LPCR, lpcr); + + cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE; + cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU; + + return 1; +} + +static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f) +{ + u64 lpcr; + + lpcr = mfspr(SPRN_LPCR); + lpcr &= ~LPCR_ISL; + mtspr(SPRN_LPCR, lpcr); + + cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE; + cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU; + + return 1; +} + + +static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f) +{ +#ifdef CONFIG_PPC_RADIX_MMU + cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX; + cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE; + cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU; + + return 1; +#endif + return 0; +} + +static int __init feat_enable_dscr(struct dt_cpu_feature *f) +{ + u64 lpcr; + + feat_enable(f); + + lpcr = mfspr(SPRN_LPCR); + lpcr &= ~LPCR_DPFD; + lpcr |= (4UL << LPCR_DPFD_SH); + mtspr(SPRN_LPCR, lpcr); + + return 1; +} + +static void hfscr_pmu_enable(void) +{ + u64 hfscr = mfspr(SPRN_HFSCR); + hfscr |= PPC_BIT(60); + mtspr(SPRN_HFSCR, hfscr); +} + +static void init_pmu_power8(void) +{ + if (hv_mode) { + mtspr(SPRN_MMCRC, 0); + mtspr(SPRN_MMCRH, 0); + } + + mtspr(SPRN_MMCRA, 0); + mtspr(SPRN_MMCR0, 0); + mtspr(SPRN_MMCR1, 0); + mtspr(SPRN_MMCR2, 0); + mtspr(SPRN_MMCRS, 0); +} + +static int __init feat_enable_mce_power8(struct dt_cpu_feature *f) +{ + cur_cpu_spec->platform = "power8"; + cur_cpu_spec->flush_tlb = __flush_tlb_power8; + cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8; + + return 1; +} + +static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f) +{ + hfscr_pmu_enable(); + + init_pmu_power8(); + init_pmu_registers = init_pmu_power8; + + cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA; + cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT; + if (pvr_version_is(PVR_POWER8E)) + cur_cpu_spec->cpu_features |= CPU_FTR_PMAO_BUG; + + cur_cpu_spec->num_pmcs = 6; + cur_cpu_spec->pmc_type = PPC_PMC_IBM; + cur_cpu_spec->oprofile_cpu_type = "ppc64/power8"; + + return 1; +} + +static void init_pmu_power9(void) +{ + if (hv_mode) + mtspr(SPRN_MMCRC, 0); + + mtspr(SPRN_MMCRA, 0); + mtspr(SPRN_MMCR0, 0); + mtspr(SPRN_MMCR1, 0); + mtspr(SPRN_MMCR2, 0); +} + +static int __init feat_enable_mce_power9(struct dt_cpu_feature *f) +{ + cur_cpu_spec->platform = "power9"; + cur_cpu_spec->flush_tlb = __flush_tlb_power9; + cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9; + + return 1; +} + +static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f) +{ + hfscr_pmu_enable(); + + init_pmu_power9(); + init_pmu_registers = init_pmu_power9; + + cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA; + cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT; + + cur_cpu_spec->num_pmcs = 6; + cur_cpu_spec->pmc_type = PPC_PMC_IBM; + cur_cpu_spec->oprofile_cpu_type = "ppc64/power9"; + + return 1; +} + +static int __init feat_enable_tm(struct dt_cpu_feature *f) +{ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + feat_enable(f); + cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NOSC; + return 1; +#endif + return 0; +} + +static int __init feat_enable_fp(struct dt_cpu_feature *f) +{ + feat_enable(f); + cur_cpu_spec->cpu_features &= ~CPU_FTR_FPU_UNAVAILABLE; + + return 1; +} + +static int __init feat_enable_vector(struct dt_cpu_feature *f) +{ +#ifdef CONFIG_ALTIVEC + feat_enable(f); + cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; + cur_cpu_spec->cpu_features |= CPU_FTR_VMX_COPY; + cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; + + return 1; +#endif + return 0; +} + +static int __init feat_enable_vsx(struct dt_cpu_feature *f) +{ +#ifdef CONFIG_VSX + feat_enable(f); + cur_cpu_spec->cpu_features |= CPU_FTR_VSX; + cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_VSX; + + return 1; +#endif + return 0; +} + +static int __init feat_enable_purr(struct dt_cpu_feature *f) +{ + cur_cpu_spec->cpu_features |= CPU_FTR_PURR | CPU_FTR_SPURR; + + return 1; +} + +static int __init feat_enable_ebb(struct dt_cpu_feature *f) +{ + /* + * PPC_FEATURE2_EBB is enabled in PMU init code because it has + * historically been related to the PMU facility. This may have + * to be decoupled if EBB becomes more generic. For now, follow + * existing convention. + */ + f->hwcap_bit_nr = -1; + feat_enable(f); + + return 1; +} + +static int __init feat_enable_dbell(struct dt_cpu_feature *f) +{ + u64 lpcr; + + /* P9 has an HFSCR for privileged state */ + feat_enable(f); + + cur_cpu_spec->cpu_features |= CPU_FTR_DBELL; + + lpcr = mfspr(SPRN_LPCR); + lpcr |= LPCR_PECEDH; /* hyp doorbell wakeup */ + mtspr(SPRN_LPCR, lpcr); + + return 1; +} + +static int __init feat_enable_hvi(struct dt_cpu_feature *f) +{ + u64 lpcr; + + /* + * POWER9 XIVE interrupts including in OPAL XICS compatibility + * are always delivered as hypervisor virtualization interrupts (HVI) + * rather than EE. + * + * However LPES0 is not set here, in the chance that an EE does get + * delivered to the host somehow, the EE handler would not expect it + * to be delivered in LPES0 mode (e.g., using SRR[01]). This could + * happen if there is a bug in interrupt controller code, or IC is + * misconfigured in systemsim. + */ + + lpcr = mfspr(SPRN_LPCR); + lpcr |= LPCR_HVICE; /* enable hvi interrupts */ + lpcr |= LPCR_HEIC; /* disable ee interrupts when MSR_HV */ + lpcr |= LPCR_PECE_HVEE; /* hvi can wake from stop */ + mtspr(SPRN_LPCR, lpcr); + + return 1; +} + +static int __init feat_enable_large_ci(struct dt_cpu_feature *f) +{ + cur_cpu_spec->mmu_features |= MMU_FTR_CI_LARGE_PAGE; + + return 1; +} + +struct dt_cpu_feature_match { + const char *name; + int (*enable)(struct dt_cpu_feature *f); + u64 cpu_ftr_bit_mask; +}; + +static struct dt_cpu_feature_match __initdata + dt_cpu_feature_match_table[] = { + {"hypervisor", feat_enable_hv, 0}, + {"big-endian", feat_enable, 0}, + {"little-endian", feat_enable_le, CPU_FTR_REAL_LE}, + {"smt", feat_enable_smt, 0}, + {"interrupt-facilities", feat_enable, 0}, + {"timer-facilities", feat_enable, 0}, + {"timer-facilities-v3", feat_enable, 0}, + {"debug-facilities", feat_enable, 0}, + {"come-from-address-register", feat_enable, CPU_FTR_CFAR}, + {"branch-tracing", feat_enable, 0}, + {"floating-point", feat_enable_fp, 0}, + {"vector", feat_enable_vector, 0}, + {"vector-scalar", feat_enable_vsx, 0}, + {"vector-scalar-v3", feat_enable, 0}, + {"decimal-floating-point", feat_enable, 0}, + {"decimal-integer", feat_enable, 0}, + {"quadword-load-store", feat_enable, 0}, + {"vector-crypto", feat_enable, 0}, + {"mmu-hash", feat_enable_mmu_hash, 0}, + {"mmu-radix", feat_enable_mmu_radix, 0}, + {"mmu-hash-v3", feat_enable_mmu_hash_v3, 0}, + {"virtual-page-class-key-protection", feat_enable, 0}, + {"transactional-memory", feat_enable_tm, CPU_FTR_TM}, + {"transactional-memory-v3", feat_enable_tm, 0}, + {"idle-nap", feat_enable_idle_nap, 0}, + {"alignment-interrupt-dsisr", feat_enable_align_dsisr, 0}, + {"idle-stop", feat_enable_idle_stop, 0}, + {"machine-check-power8", feat_enable_mce_power8, 0}, + {"performance-monitor-power8", feat_enable_pmu_power8, 0}, + {"data-stream-control-register", feat_enable_dscr, CPU_FTR_DSCR}, + {"event-based-branch", feat_enable_ebb, 0}, + {"target-address-register", feat_enable, 0}, + {"branch-history-rolling-buffer", feat_enable, 0}, + {"control-register", feat_enable, CPU_FTR_CTRL}, + {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL}, + {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL}, + {"processor-utilization-of-resources-register", feat_enable_purr, 0}, + {"subcore", feat_enable, CPU_FTR_SUBCORE}, + {"no-execute", feat_enable, 0}, + {"strong-access-ordering", feat_enable, CPU_FTR_SAO}, + {"cache-inhibited-large-page", feat_enable_large_ci, 0}, + {"coprocessor-icswx", feat_enable, CPU_FTR_ICSWX}, + {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0}, + {"program-priority-register", feat_enable, CPU_FTR_HAS_PPR}, + {"wait", feat_enable, 0}, + {"atomic-memory-operations", feat_enable, 0}, + {"branch-v3", feat_enable, 0}, + {"copy-paste", feat_enable, 0}, + {"decimal-floating-point-v3", feat_enable, 0}, + {"decimal-integer-v3", feat_enable, 0}, + {"fixed-point-v3", feat_enable, 0}, + {"floating-point-v3", feat_enable, 0}, + {"group-start-register", feat_enable, 0}, + {"pc-relative-addressing", feat_enable, 0}, + {"machine-check-power9", feat_enable_mce_power9, 0}, + {"performance-monitor-power9", feat_enable_pmu_power9, 0}, + {"event-based-branch-v3", feat_enable, 0}, + {"random-number-generator", feat_enable, 0}, + {"system-call-vectored", feat_disable, 0}, + {"trace-interrupt-v3", feat_enable, 0}, + {"vector-v3", feat_enable, 0}, + {"vector-binary128", feat_enable, 0}, + {"vector-binary16", feat_enable, 0}, + {"wait-v3", feat_enable, 0}, +}; + +/* XXX: how to configure this? Default + boot time? */ +#ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN +#define CPU_FEATURE_ENABLE_UNKNOWN 1 +#else +#define CPU_FEATURE_ENABLE_UNKNOWN 0 +#endif + +static void __init cpufeatures_setup_start(u32 isa) +{ + pr_info("setup for ISA %d\n", isa); + + if (isa >= 3000) { + cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300; + cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00; + } +} + +static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f) +{ + const struct dt_cpu_feature_match *m; + bool known = false; + int i; + + for (i = 0; i < ARRAY_SIZE(dt_cpu_feature_match_table); i++) { + m = &dt_cpu_feature_match_table[i]; + if (!strcmp(f->name, m->name)) { + known = true; + if (m->enable(f)) + break; + + pr_info("not enabling: %s (disabled or unsupported by kernel)\n", + f->name); + return false; + } + } + + if (!known && CPU_FEATURE_ENABLE_UNKNOWN) { + if (!feat_try_enable_unknown(f)) { + pr_info("not enabling: %s (unknown and unsupported by kernel)\n", + f->name); + return false; + } + } + + if (m->cpu_ftr_bit_mask) + cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask; + + if (known) + pr_debug("enabling: %s\n", f->name); + else + pr_debug("enabling: %s (unknown)\n", f->name); + + return true; +} + +static __init void cpufeatures_cpu_quirks(void) +{ + int version = mfspr(SPRN_PVR); + + /* + * Not all quirks can be derived from the cpufeatures device tree. + */ + if ((version & 0xffffff00) == 0x004e0100) + cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1; +} + +static void __init cpufeatures_setup_finished(void) +{ + cpufeatures_cpu_quirks(); + + if (hv_mode && !(cur_cpu_spec->cpu_features & CPU_FTR_HVMODE)) { + pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n"); + cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE; + } + + system_registers.lpcr = mfspr(SPRN_LPCR); + system_registers.hfscr = mfspr(SPRN_HFSCR); + system_registers.fscr = mfspr(SPRN_FSCR); + + cpufeatures_flush_tlb(); + + pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n", + cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features); +} + +static int __init fdt_find_cpu_features(unsigned long node, const char *uname, + int depth, void *data) +{ + if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features") + && of_get_flat_dt_prop(node, "isa", NULL)) + return 1; + + return 0; +} + +static bool __initdata using_dt_cpu_ftrs = false; + +bool __init dt_cpu_ftrs_in_use(void) +{ + return using_dt_cpu_ftrs; +} + +bool __init dt_cpu_ftrs_init(void *fdt) +{ + /* Setup and verify the FDT, if it fails we just bail */ + if (!early_init_dt_verify(fdt)) + return false; + + if (!of_scan_flat_dt(fdt_find_cpu_features, NULL)) + return false; + + cpufeatures_setup_cpu(); + + using_dt_cpu_ftrs = true; + return true; +} + +static int nr_dt_cpu_features; +static struct dt_cpu_feature *dt_cpu_features; + +static int __init process_cpufeatures_node(unsigned long node, + const char *uname, int i) +{ + const __be32 *prop; + struct dt_cpu_feature *f; + int len; + + f = &dt_cpu_features[i]; + memset(f, 0, sizeof(struct dt_cpu_feature)); + + f->node = node; + + f->name = uname; + + prop = of_get_flat_dt_prop(node, "isa", &len); + if (!prop) { + pr_warn("%s: missing isa property\n", uname); + return 0; + } + f->isa = be32_to_cpup(prop); + + prop = of_get_flat_dt_prop(node, "usable-privilege", &len); + if (!prop) { + pr_warn("%s: missing usable-privilege property", uname); + return 0; + } + f->usable_privilege = be32_to_cpup(prop); + + prop = of_get_flat_dt_prop(node, "hv-support", &len); + if (prop) + f->hv_support = be32_to_cpup(prop); + else + f->hv_support = HV_SUPPORT_NONE; + + prop = of_get_flat_dt_prop(node, "os-support", &len); + if (prop) + f->os_support = be32_to_cpup(prop); + else + f->os_support = OS_SUPPORT_NONE; + + prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len); + if (prop) + f->hfscr_bit_nr = be32_to_cpup(prop); + else + f->hfscr_bit_nr = -1; + prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len); + if (prop) + f->fscr_bit_nr = be32_to_cpup(prop); + else + f->fscr_bit_nr = -1; + prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len); + if (prop) + f->hwcap_bit_nr = be32_to_cpup(prop); + else + f->hwcap_bit_nr = -1; + + if (f->usable_privilege & USABLE_HV) { + if (!(mfmsr() & MSR_HV)) { + pr_warn("%s: HV feature passed to guest\n", uname); + return 0; + } + + if (f->hv_support == HV_SUPPORT_NONE && f->hfscr_bit_nr != -1) { + pr_warn("%s: unwanted hfscr_bit_nr\n", uname); + return 0; + } + + if (f->hv_support == HV_SUPPORT_HFSCR) { + if (f->hfscr_bit_nr == -1) { + pr_warn("%s: missing hfscr_bit_nr\n", uname); + return 0; + } + } + } else { + if (f->hv_support != HV_SUPPORT_NONE || f->hfscr_bit_nr != -1) { + pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname); + return 0; + } + } + + if (f->usable_privilege & USABLE_OS) { + if (f->os_support == OS_SUPPORT_NONE && f->fscr_bit_nr != -1) { + pr_warn("%s: unwanted fscr_bit_nr\n", uname); + return 0; + } + + if (f->os_support == OS_SUPPORT_FSCR) { + if (f->fscr_bit_nr == -1) { + pr_warn("%s: missing fscr_bit_nr\n", uname); + return 0; + } + } + } else { + if (f->os_support != OS_SUPPORT_NONE || f->fscr_bit_nr != -1) { + pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname); + return 0; + } + } + + if (!(f->usable_privilege & USABLE_PR)) { + if (f->hwcap_bit_nr != -1) { + pr_warn("%s: unwanted hwcap_bit_nr\n", uname); + return 0; + } + } + + /* Do all the independent features in the first pass */ + if (!of_get_flat_dt_prop(node, "dependencies", &len)) { + if (cpufeatures_process_feature(f)) + f->enabled = 1; + else + f->disabled = 1; + } + + return 0; +} + +static void __init cpufeatures_deps_enable(struct dt_cpu_feature *f) +{ + const __be32 *prop; + int len; + int nr_deps; + int i; + + if (f->enabled || f->disabled) + return; + + prop = of_get_flat_dt_prop(f->node, "dependencies", &len); + if (!prop) { + pr_warn("%s: missing dependencies property", f->name); + return; + } + + nr_deps = len / sizeof(int); + + for (i = 0; i < nr_deps; i++) { + unsigned long phandle = be32_to_cpu(prop[i]); + int j; + + for (j = 0; j < nr_dt_cpu_features; j++) { + struct dt_cpu_feature *d = &dt_cpu_features[j]; + + if (of_get_flat_dt_phandle(d->node) == phandle) { + cpufeatures_deps_enable(d); + if (d->disabled) { + f->disabled = 1; + return; + } + } + } + } + + if (cpufeatures_process_feature(f)) + f->enabled = 1; + else + f->disabled = 1; +} + +static int __init scan_cpufeatures_subnodes(unsigned long node, + const char *uname, + void *data) +{ + int *count = data; + + process_cpufeatures_node(node, uname, *count); + + (*count)++; + + return 0; +} + +static int __init count_cpufeatures_subnodes(unsigned long node, + const char *uname, + void *data) +{ + int *count = data; + + (*count)++; + + return 0; +} + +static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char + *uname, int depth, void *data) +{ + const __be32 *prop; + int count, i; + u32 isa; + + /* We are scanning "ibm,powerpc-cpu-features" nodes only */ + if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")) + return 0; + + prop = of_get_flat_dt_prop(node, "isa", NULL); + if (!prop) + /* We checked before, "can't happen" */ + return 0; + + isa = be32_to_cpup(prop); + + /* Count and allocate space for cpu features */ + of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes, + &nr_dt_cpu_features); + dt_cpu_features = __va( + memblock_alloc(sizeof(struct dt_cpu_feature)* + nr_dt_cpu_features, PAGE_SIZE)); + + cpufeatures_setup_start(isa); + + /* Scan nodes into dt_cpu_features and enable those without deps */ + count = 0; + of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count); + + /* Recursive enable remaining features with dependencies */ + for (i = 0; i < nr_dt_cpu_features; i++) { + struct dt_cpu_feature *f = &dt_cpu_features[i]; + + cpufeatures_deps_enable(f); + } + + prop = of_get_flat_dt_prop(node, "display-name", NULL); + if (prop && strlen((char *)prop) != 0) { + strlcpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name)); + cur_cpu_spec->cpu_name = dt_cpu_name; + } + + cpufeatures_setup_finished(); + + memblock_free(__pa(dt_cpu_features), + sizeof(struct dt_cpu_feature)*nr_dt_cpu_features); + + return 0; +} + +void __init dt_cpu_ftrs_scan(void) +{ + of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL); +} diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 45b453e4d0c8..acd8ca76233e 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -735,8 +735,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) andis. r15,r14,(DBSR_IC|DBSR_BT)@h beq+ 1f +#ifdef CONFIG_RELOCATABLE + ld r15,PACATOC(r13) + ld r14,interrupt_base_book3e@got(r15) + ld r15,__end_interrupts@got(r15) +#else LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) LOAD_REG_IMMEDIATE(r15,__end_interrupts) +#endif cmpld cr0,r10,r14 cmpld cr1,r10,r15 blt+ cr0,1f @@ -799,8 +805,14 @@ kernel_dbg_exc: andis. r15,r14,(DBSR_IC|DBSR_BT)@h beq+ 1f +#ifdef CONFIG_RELOCATABLE + ld r15,PACATOC(r13) + ld r14,interrupt_base_book3e@got(r15) + ld r15,__end_interrupts@got(r15) +#else LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) LOAD_REG_IMMEDIATE(r15,__end_interrupts) +#endif cmpld cr0,r10,r14 cmpld cr1,r10,r15 blt+ cr0,1f diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index a9312b52fe6f..ae418b85c17c 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -391,9 +391,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early) */ BEGIN_FTR_SECTION rlwinm. r11,r12,47-31,30,31 - beq- 4f - BRANCH_TO_COMMON(r10, machine_check_idle_common) -4: + bne machine_check_idle_common END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) #endif diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index d2f0afeae5a0..40c4887c27b6 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -57,6 +57,7 @@ #include <asm/fadump.h> #include <asm/epapr_hcalls.h> #include <asm/firmware.h> +#include <asm/dt_cpu_ftrs.h> #include <mm/mmu_decl.h> @@ -375,23 +376,31 @@ static int __init early_init_dt_scan_cpus(unsigned long node, * A POWER6 partition in "POWER6 architected" mode * uses the 0x0f000002 PVR value; in POWER5+ mode * it uses 0x0f000001. + * + * If we're using device tree CPU feature discovery then we don't + * support the cpu-version property, and it's the responsibility of the + * firmware/hypervisor to provide the correct feature set for the + * architecture level via the ibm,powerpc-cpu-features binding. */ - prop = of_get_flat_dt_prop(node, "cpu-version", NULL); - if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) - identify_cpu(0, be32_to_cpup(prop)); + if (!dt_cpu_ftrs_in_use()) { + prop = of_get_flat_dt_prop(node, "cpu-version", NULL); + if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) + identify_cpu(0, be32_to_cpup(prop)); - identical_pvr_fixup(node); + check_cpu_feature_properties(node); + check_cpu_pa_features(node); + } - check_cpu_feature_properties(node); - check_cpu_pa_features(node); + identical_pvr_fixup(node); init_mmu_slb_size(node); #ifdef CONFIG_PPC64 - if (nthreads > 1) - cur_cpu_spec->cpu_features |= CPU_FTR_SMT; - else + if (nthreads == 1) cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; + else if (!dt_cpu_ftrs_in_use()) + cur_cpu_spec->cpu_features |= CPU_FTR_SMT; #endif + return 0; } @@ -721,6 +730,8 @@ void __init early_init_devtree(void *params) DBG("Scanning CPUs ...\n"); + dt_cpu_ftrs_scan(); + /* Retrieve CPU related informations from the flat tree * (altivec support, boot CPU ID, ...) */ diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 69e077180db6..71dcda91755d 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -261,7 +261,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "processor\t: %lu\n", cpu_id); seq_printf(m, "cpu\t\t: "); - if (cur_cpu_spec->pvr_mask) + if (cur_cpu_spec->pvr_mask && cur_cpu_spec->cpu_name) seq_printf(m, "%s", cur_cpu_spec->cpu_name); else seq_printf(m, "unknown (%08x)", pvr); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 0d4dcaeaafcb..f35ff9dea4fb 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -49,6 +49,7 @@ #include <asm/paca.h> #include <asm/time.h> #include <asm/cputable.h> +#include <asm/dt_cpu_ftrs.h> #include <asm/sections.h> #include <asm/btext.h> #include <asm/nvram.h> @@ -274,8 +275,10 @@ void __init early_setup(unsigned long dt_ptr) /* -------- printk is _NOT_ safe to use here ! ------- */ - /* Identify CPU type */ - identify_cpu(0, mfspr(SPRN_PVR)); + /* Try new device tree based feature discovery ... */ + if (!dt_cpu_ftrs_init(__va(dt_ptr))) + /* Otherwise use the old style CPU table */ + identify_cpu(0, mfspr(SPRN_PVR)); /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ initialise_paca(&boot_paca, 0); @@ -541,6 +544,9 @@ void __init initialize_cache_info(void) dcache_bsize = ppc64_caches.l1d.block_size; icache_bsize = ppc64_caches.l1i.block_size; + cur_cpu_spec->dcache_bsize = dcache_bsize; + cur_cpu_spec->icache_bsize = icache_bsize; + DBG(" <- initialize_cache_info()\n"); } diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index d2f19821d71d..d12ea7b9fd47 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -412,11 +412,14 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data) * been set for the PE, we will set EEH_PE_CFG_BLOCKED for * that PE to block its config space. * + * Broadcom BCM5718 2-ports NICs (14e4:1656) * Broadcom Austin 4-ports NICs (14e4:1657) * Broadcom Shiner 4-ports 1G NICs (14e4:168a) * Broadcom Shiner 2-ports 10G NICs (14e4:168e) */ if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && + pdn->device_id == 0x1656) || + (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && pdn->device_id == 0x1657) || (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && pdn->device_id == 0x168a) || diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c index 986cd111d4df..c651e668996b 100644 --- a/arch/powerpc/sysdev/cpm1.c +++ b/arch/powerpc/sysdev/cpm1.c @@ -377,6 +377,10 @@ static void cpm1_set_pin16(int port, int pin, int flags) setbits16(&iop->odr_sor, pin); else clrbits16(&iop->odr_sor, pin); + if (flags & CPM_PIN_FALLEDGE) + setbits16(&iop->intr, pin); + else + clrbits16(&iop->intr, pin); } } @@ -528,6 +532,9 @@ struct cpm1_gpio16_chip { /* shadowed data register to clear/set bits safely */ u16 cpdata; + + /* IRQ associated with Pins when relevant */ + int irq[16]; }; static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc) @@ -578,6 +585,14 @@ static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value) spin_unlock_irqrestore(&cpm1_gc->lock, flags); } +static int cpm1_gpio16_to_irq(struct gpio_chip *gc, unsigned int gpio) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); + + return cpm1_gc->irq[gpio] ? : -ENXIO; +} + static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); @@ -618,6 +633,7 @@ int cpm1_gpiochip_add16(struct device_node *np) struct cpm1_gpio16_chip *cpm1_gc; struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; + u16 mask; cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL); if (!cpm1_gc) @@ -625,6 +641,14 @@ int cpm1_gpiochip_add16(struct device_node *np) spin_lock_init(&cpm1_gc->lock); + if (!of_property_read_u16(np, "fsl,cpm1-gpio-irq-mask", &mask)) { + int i, j; + + for (i = 0, j = 0; i < 16; i++) + if (mask & (1 << (15 - i))) + cpm1_gc->irq[i] = irq_of_parse_and_map(np, j++); + } + mm_gc = &cpm1_gc->mm_gc; gc = &mm_gc->gc; @@ -634,6 +658,7 @@ int cpm1_gpiochip_add16(struct device_node *np) gc->direction_output = cpm1_gpio16_dir_out; gc->get = cpm1_gpio16_get; gc->set = cpm1_gpio16_set; + gc->to_irq = cpm1_gpio16_to_irq; return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc); } |