From c9a1ff316bc9b1d1806a4366d0aef6e18833ba52 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Wed, 17 Jun 2020 18:56:24 -0400 Subject: x86/stackprotector: Pre-initialize canary for secondary CPUs The idle tasks created for each secondary CPU already have a random stack canary generated by fork(). Copy the canary to the percpu variable before starting the secondary CPU which removes the need to call boot_init_stack_canary(). Signed-off-by: Brian Gerst Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200617225624.799335-1-brgerst@gmail.com --- arch/x86/include/asm/stackprotector.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch/x86/include/asm') diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 9804a7957f4e..7fb482f0f25b 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -90,6 +90,15 @@ static __always_inline void boot_init_stack_canary(void) #endif } +static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle) +{ +#ifdef CONFIG_X86_64 + per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary; +#else + per_cpu(stack_canary.canary, cpu) = idle->stack_canary; +#endif +} + static inline void setup_stack_canary_segment(int cpu) { #ifdef CONFIG_X86_32 @@ -119,6 +128,9 @@ static inline void load_stack_canary_segment(void) static inline void setup_stack_canary_segment(int cpu) { } +static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle) +{ } + static inline void load_stack_canary_segment(void) { #ifdef CONFIG_X86_32 -- cgit v1.2.3 From e00b62f0b06d0ae2b844049f216807617aff0cdb Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Mon, 20 Jul 2020 21:37:49 -0700 Subject: x86/cpu: Add Lakefield, Alder Lake and Rocket Lake models to the to Intel CPU family Add three new Intel CPU models. Signed-off-by: Tony Luck Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20200721043749.31567-1-tony.luck@intel.com --- arch/x86/include/asm/intel-family.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/x86/include/asm') diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index a338a6deb950..5e658ba2654a 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -89,8 +89,15 @@ #define INTEL_FAM6_COMETLAKE 0xA5 #define INTEL_FAM6_COMETLAKE_L 0xA6 +#define INTEL_FAM6_ROCKETLAKE 0xA7 + #define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F +/* Hybrid Core/Atom Processors */ + +#define INTEL_FAM6_LAKEFIELD 0x8A +#define INTEL_FAM6_ALDERLAKE 0x97 + /* "Small Core" Processors (Atom) */ #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ -- cgit v1.2.3 From 85b23fbc7d88f8c6e3951721802d7845bc39663d Mon Sep 17 00:00:00 2001 From: Ricardo Neri Date: Sun, 26 Jul 2020 21:31:29 -0700 Subject: x86/cpufeatures: Add enumeration for SERIALIZE instruction The Intel architecture defines a set of Serializing Instructions (a detailed definition can be found in Vol.3 Section 8.3 of the Intel "main" manual, SDM). However, these instructions do more than what is required, have side effects and/or may be rather invasive. Furthermore, some of these instructions are only available in kernel mode or may cause VMExits. Thus, software using these instructions only to serialize execution (as defined in the manual) must handle the undesired side effects. As indicated in the name, SERIALIZE is a new Intel architecture Serializing Instruction. Crucially, it does not have any of the mentioned side effects. Also, it does not cause VMExit and can be used in user mode. This new instruction is currently documented in the latest "extensions" manual (ISE). It will appear in the "main" manual in the future. Signed-off-by: Ricardo Neri Signed-off-by: Ingo Molnar Reviewed-by: Tony Luck Acked-by: Dave Hansen Link: https://lore.kernel.org/r/20200727043132.15082-2-ricardo.neri-calderon@linux.intel.com --- arch/x86/include/asm/cpufeatures.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include/asm') diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 02dabc9e77b0..adf45cf3651b 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -365,6 +365,7 @@ #define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */ #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ +#define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ -- cgit v1.2.3 From 9998a9832c4027e907353e5e05fde730cf624b77 Mon Sep 17 00:00:00 2001 From: Ricardo Neri Date: Sun, 26 Jul 2020 21:31:30 -0700 Subject: x86/cpu: Relocate sync_core() to sync_core.h Having sync_core() in processor.h is problematic since it is not possible to check for hardware capabilities via the *cpu_has() family of macros. The latter needs the definitions in processor.h. It also looks more intuitive to relocate the function to sync_core.h. This changeset does not make changes in functionality. Signed-off-by: Ricardo Neri Signed-off-by: Ingo Molnar Reviewed-by: Tony Luck Link: https://lore.kernel.org/r/20200727043132.15082-3-ricardo.neri-calderon@linux.intel.com --- arch/x86/include/asm/processor.h | 64 ---------------------------------------- arch/x86/include/asm/sync_core.h | 64 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 64 deletions(-) (limited to 'arch/x86/include/asm') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 03b7c4ca425a..68ba42fdd184 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -678,70 +678,6 @@ static inline unsigned int cpuid_edx(unsigned int op) return edx; } -/* - * This function forces the icache and prefetched instruction stream to - * catch up with reality in two very specific cases: - * - * a) Text was modified using one virtual address and is about to be executed - * from the same physical page at a different virtual address. - * - * b) Text was modified on a different CPU, may subsequently be - * executed on this CPU, and you want to make sure the new version - * gets executed. This generally means you're calling this in a IPI. - * - * If you're calling this for a different reason, you're probably doing - * it wrong. - */ -static inline void sync_core(void) -{ - /* - * There are quite a few ways to do this. IRET-to-self is nice - * because it works on every CPU, at any CPL (so it's compatible - * with paravirtualization), and it never exits to a hypervisor. - * The only down sides are that it's a bit slow (it seems to be - * a bit more than 2x slower than the fastest options) and that - * it unmasks NMIs. The "push %cs" is needed because, in - * paravirtual environments, __KERNEL_CS may not be a valid CS - * value when we do IRET directly. - * - * In case NMI unmasking or performance ever becomes a problem, - * the next best option appears to be MOV-to-CR2 and an - * unconditional jump. That sequence also works on all CPUs, - * but it will fault at CPL3 (i.e. Xen PV). - * - * CPUID is the conventional way, but it's nasty: it doesn't - * exist on some 486-like CPUs, and it usually exits to a - * hypervisor. - * - * Like all of Linux's memory ordering operations, this is a - * compiler barrier as well. - */ -#ifdef CONFIG_X86_32 - asm volatile ( - "pushfl\n\t" - "pushl %%cs\n\t" - "pushl $1f\n\t" - "iret\n\t" - "1:" - : ASM_CALL_CONSTRAINT : : "memory"); -#else - unsigned int tmp; - - asm volatile ( - "mov %%ss, %0\n\t" - "pushq %q0\n\t" - "pushq %%rsp\n\t" - "addq $8, (%%rsp)\n\t" - "pushfq\n\t" - "mov %%cs, %0\n\t" - "pushq %q0\n\t" - "pushq $1f\n\t" - "iretq\n\t" - "1:" - : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory"); -#endif -} - extern void select_idle_routine(const struct cpuinfo_x86 *c); extern void amd_e400_c1e_apic_setup(void); diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h index c67caafd3381..9c5573f2c333 100644 --- a/arch/x86/include/asm/sync_core.h +++ b/arch/x86/include/asm/sync_core.h @@ -6,6 +6,70 @@ #include #include +/* + * This function forces the icache and prefetched instruction stream to + * catch up with reality in two very specific cases: + * + * a) Text was modified using one virtual address and is about to be executed + * from the same physical page at a different virtual address. + * + * b) Text was modified on a different CPU, may subsequently be + * executed on this CPU, and you want to make sure the new version + * gets executed. This generally means you're calling this in a IPI. + * + * If you're calling this for a different reason, you're probably doing + * it wrong. + */ +static inline void sync_core(void) +{ + /* + * There are quite a few ways to do this. IRET-to-self is nice + * because it works on every CPU, at any CPL (so it's compatible + * with paravirtualization), and it never exits to a hypervisor. + * The only down sides are that it's a bit slow (it seems to be + * a bit more than 2x slower than the fastest options) and that + * it unmasks NMIs. The "push %cs" is needed because, in + * paravirtual environments, __KERNEL_CS may not be a valid CS + * value when we do IRET directly. + * + * In case NMI unmasking or performance ever becomes a problem, + * the next best option appears to be MOV-to-CR2 and an + * unconditional jump. That sequence also works on all CPUs, + * but it will fault at CPL3 (i.e. Xen PV). + * + * CPUID is the conventional way, but it's nasty: it doesn't + * exist on some 486-like CPUs, and it usually exits to a + * hypervisor. + * + * Like all of Linux's memory ordering operations, this is a + * compiler barrier as well. + */ +#ifdef CONFIG_X86_32 + asm volatile ( + "pushfl\n\t" + "pushl %%cs\n\t" + "pushl $1f\n\t" + "iret\n\t" + "1:" + : ASM_CALL_CONSTRAINT : : "memory"); +#else + unsigned int tmp; + + asm volatile ( + "mov %%ss, %0\n\t" + "pushq %q0\n\t" + "pushq %%rsp\n\t" + "addq $8, (%%rsp)\n\t" + "pushfq\n\t" + "mov %%cs, %0\n\t" + "pushq %q0\n\t" + "pushq $1f\n\t" + "iretq\n\t" + "1:" + : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory"); +#endif +} + /* * Ensure that a core serializing instruction is issued before returning * to user-mode. x86 implements return to user-space through sysexit, -- cgit v1.2.3 From f69ca629d89d65737537e05308ac531f7bb07d5c Mon Sep 17 00:00:00 2001 From: Ricardo Neri Date: Sun, 26 Jul 2020 21:31:31 -0700 Subject: x86/cpu: Refactor sync_core() for readability Instead of having #ifdef/#endif blocks inside sync_core() for X86_64 and X86_32, implement the new function iret_to_self() with two versions. In this manner, avoid having to use even more more #ifdef/#endif blocks when adding support for SERIALIZE in sync_core(). Co-developed-by: Tony Luck Signed-off-by: Tony Luck Signed-off-by: Ricardo Neri Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20200727043132.15082-4-ricardo.neri-calderon@linux.intel.com --- arch/x86/include/asm/special_insns.h | 1 - arch/x86/include/asm/sync_core.h | 56 ++++++++++++++++++++---------------- 2 files changed, 32 insertions(+), 25 deletions(-) (limited to 'arch/x86/include/asm') diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index eb8e781c4353..59a3e13204c3 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -234,7 +234,6 @@ static inline void clwb(volatile void *__p) #define nop() asm volatile ("nop") - #endif /* __KERNEL__ */ #endif /* _ASM_X86_SPECIAL_INSNS_H */ diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h index 9c5573f2c333..fdb5b356e59b 100644 --- a/arch/x86/include/asm/sync_core.h +++ b/arch/x86/include/asm/sync_core.h @@ -6,6 +6,37 @@ #include #include +#ifdef CONFIG_X86_32 +static inline void iret_to_self(void) +{ + asm volatile ( + "pushfl\n\t" + "pushl %%cs\n\t" + "pushl $1f\n\t" + "iret\n\t" + "1:" + : ASM_CALL_CONSTRAINT : : "memory"); +} +#else +static inline void iret_to_self(void) +{ + unsigned int tmp; + + asm volatile ( + "mov %%ss, %0\n\t" + "pushq %q0\n\t" + "pushq %%rsp\n\t" + "addq $8, (%%rsp)\n\t" + "pushfq\n\t" + "mov %%cs, %0\n\t" + "pushq %q0\n\t" + "pushq $1f\n\t" + "iretq\n\t" + "1:" + : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory"); +} +#endif /* CONFIG_X86_32 */ + /* * This function forces the icache and prefetched instruction stream to * catch up with reality in two very specific cases: @@ -44,30 +75,7 @@ static inline void sync_core(void) * Like all of Linux's memory ordering operations, this is a * compiler barrier as well. */ -#ifdef CONFIG_X86_32 - asm volatile ( - "pushfl\n\t" - "pushl %%cs\n\t" - "pushl $1f\n\t" - "iret\n\t" - "1:" - : ASM_CALL_CONSTRAINT : : "memory"); -#else - unsigned int tmp; - - asm volatile ( - "mov %%ss, %0\n\t" - "pushq %q0\n\t" - "pushq %%rsp\n\t" - "addq $8, (%%rsp)\n\t" - "pushfq\n\t" - "mov %%cs, %0\n\t" - "pushq %q0\n\t" - "pushq $1f\n\t" - "iretq\n\t" - "1:" - : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory"); -#endif + iret_to_self(); } /* -- cgit v1.2.3