From 3ac6d8c787b835b997eb23e43e09aa0895ef7d58 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 5 Feb 2018 17:18:11 -0800 Subject: x86/entry/64: Clear registers for exceptions/interrupts, to reduce speculation attack surface Clear the 'extra' registers on entering the 64-bit kernel for exceptions and interrupts. The common registers are not cleared since they are likely clobbered well before they can be exploited in a speculative execution attack. Originally-From: Andi Kleen Signed-off-by: Dan Williams Cc: Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/151787989146.7847.15749181712358213254.stgit@dwillia2-desk3.amr.corp.intel.com [ Made small improvements to the changelog and the code comments. ] Signed-off-by: Ingo Molnar --- arch/x86/entry/calling.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch/x86/entry/calling.h') diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 3f48f695d5e6..f4b129d4af42 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -147,6 +147,25 @@ For 32-bit we have the following conventions - kernel is built with UNWIND_HINT_REGS offset=\offset .endm + /* + * Sanitize registers of values that a speculation attack + * might otherwise want to exploit. The lower registers are + * likely clobbered well before they could be put to use in + * a speculative execution gadget: + */ + .macro CLEAR_REGS_NOSPEC + xorl %ebp, %ebp + xorl %ebx, %ebx + xorq %r8, %r8 + xorq %r9, %r9 + xorq %r10, %r10 + xorq %r11, %r11 + xorq %r12, %r12 + xorq %r13, %r13 + xorq %r14, %r14 + xorq %r15, %r15 + .endm + .macro POP_EXTRA_REGS popq %r15 popq %r14 -- cgit v1.2.3 From 2e3f0098bc45f710a2f4cbcc94b80a1fae7a99a1 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 11:49:42 +0100 Subject: x86/entry/64: Merge SAVE_C_REGS and SAVE_EXTRA_REGS, remove unused extensions All current code paths call SAVE_C_REGS and then immediately SAVE_EXTRA_REGS. Therefore, merge these two macros and order the MOV sequeneces properly. While at it, remove the macros to save all except specific registers, as these macros have been unused for a long time. Suggested-by: Linus Torvalds Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180211104949.12992-2-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar --- arch/x86/entry/calling.h | 57 +++++++++++++----------------------------------- 1 file changed, 15 insertions(+), 42 deletions(-) (limited to 'arch/x86/entry/calling.h') diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index f4b129d4af42..8907a6593b42 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -101,49 +101,22 @@ For 32-bit we have the following conventions - kernel is built with addq $-(15*8), %rsp .endm - .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 - .if \r11 - movq %r11, 6*8+\offset(%rsp) - .endif - .if \r8910 - movq %r10, 7*8+\offset(%rsp) - movq %r9, 8*8+\offset(%rsp) - movq %r8, 9*8+\offset(%rsp) - .endif - .if \rax - movq %rax, 10*8+\offset(%rsp) - .endif - .if \rcx - movq %rcx, 11*8+\offset(%rsp) - .endif - movq %rdx, 12*8+\offset(%rsp) - movq %rsi, 13*8+\offset(%rsp) + .macro SAVE_REGS offset=0 movq %rdi, 14*8+\offset(%rsp) - UNWIND_HINT_REGS offset=\offset extra=0 - .endm - .macro SAVE_C_REGS offset=0 - SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 - .endm - .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0 - SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1 - .endm - .macro SAVE_C_REGS_EXCEPT_R891011 - SAVE_C_REGS_HELPER 0, 1, 1, 0, 0 - .endm - .macro SAVE_C_REGS_EXCEPT_RCX_R891011 - SAVE_C_REGS_HELPER 0, 1, 0, 0, 0 - .endm - .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11 - SAVE_C_REGS_HELPER 0, 0, 0, 1, 0 - .endm - - .macro SAVE_EXTRA_REGS offset=0 - movq %r15, 0*8+\offset(%rsp) - movq %r14, 1*8+\offset(%rsp) - movq %r13, 2*8+\offset(%rsp) - movq %r12, 3*8+\offset(%rsp) - movq %rbp, 4*8+\offset(%rsp) + movq %rsi, 13*8+\offset(%rsp) + movq %rdx, 12*8+\offset(%rsp) + movq %rcx, 11*8+\offset(%rsp) + movq %rax, 10*8+\offset(%rsp) + movq %r8, 9*8+\offset(%rsp) + movq %r9, 8*8+\offset(%rsp) + movq %r10, 7*8+\offset(%rsp) + movq %r11, 6*8+\offset(%rsp) movq %rbx, 5*8+\offset(%rsp) + movq %rbp, 4*8+\offset(%rsp) + movq %r12, 3*8+\offset(%rsp) + movq %r13, 2*8+\offset(%rsp) + movq %r14, 1*8+\offset(%rsp) + movq %r15, 0*8+\offset(%rsp) UNWIND_HINT_REGS offset=\offset .endm @@ -197,7 +170,7 @@ For 32-bit we have the following conventions - kernel is built with * is just setting the LSB, which makes it an invalid stack address and is also * a signal to the unwinder that it's a pt_regs pointer in disguise. * - * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts + * NOTE: This macro must be used *after* SAVE_REGS because it corrupts * the original rbp. */ .macro ENCODE_FRAME_POINTER ptregs_offset=0 -- cgit v1.2.3 From 502af0d70843c2a9405d7ba1f79b4b0305aaf5f5 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 11:49:43 +0100 Subject: x86/entry/64: Merge the POP_C_REGS and POP_EXTRA_REGS macros into a single POP_REGS macro The two special, opencoded cases for POP_C_REGS can be handled by ASM macros. Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180211104949.12992-3-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar --- arch/x86/entry/calling.h | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'arch/x86/entry/calling.h') diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 8907a6593b42..3bda31736a7b 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -139,25 +139,32 @@ For 32-bit we have the following conventions - kernel is built with xorq %r15, %r15 .endm - .macro POP_EXTRA_REGS + .macro POP_REGS pop_rdi=1 skip_r11rcx=0 popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx - .endm - - .macro POP_C_REGS + .if \skip_r11rcx + popq %rsi + .else popq %r11 + .endif popq %r10 popq %r9 popq %r8 popq %rax + .if \skip_r11rcx + popq %rsi + .else popq %rcx + .endif popq %rdx popq %rsi + .if \pop_rdi popq %rdi + .endif .endm .macro icebp -- cgit v1.2.3 From f7bafa2b05ef25eda1d9179fd930b0330cf2b7d1 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 11:49:44 +0100 Subject: x86/entry/64: Interleave XOR register clearing with PUSH instructions Same as is done for syscalls, interleave XOR with PUSH instructions for exceptions/interrupts, in order to minimize the cost of the additional instructions required for register clearing. Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180211104949.12992-4-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar --- arch/x86/entry/calling.h | 40 +++++++++++++++++++--------------------- 1 file changed, 19 insertions(+), 21 deletions(-) (limited to 'arch/x86/entry/calling.h') diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 3bda31736a7b..a05cbb81268d 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -101,44 +101,42 @@ For 32-bit we have the following conventions - kernel is built with addq $-(15*8), %rsp .endm - .macro SAVE_REGS offset=0 + .macro SAVE_AND_CLEAR_REGS offset=0 + /* + * Save registers and sanitize registers of values that a + * speculation attack might otherwise want to exploit. The + * lower registers are likely clobbered well before they + * could be put to use in a speculative execution gadget. + * Interleave XOR with PUSH for better uop scheduling: + */ movq %rdi, 14*8+\offset(%rsp) movq %rsi, 13*8+\offset(%rsp) movq %rdx, 12*8+\offset(%rsp) movq %rcx, 11*8+\offset(%rsp) movq %rax, 10*8+\offset(%rsp) movq %r8, 9*8+\offset(%rsp) + xorq %r8, %r8 /* nospec r8 */ movq %r9, 8*8+\offset(%rsp) + xorq %r9, %r9 /* nospec r9 */ movq %r10, 7*8+\offset(%rsp) + xorq %r10, %r10 /* nospec r10 */ movq %r11, 6*8+\offset(%rsp) + xorq %r11, %r11 /* nospec r11 */ movq %rbx, 5*8+\offset(%rsp) + xorl %ebx, %ebx /* nospec rbx */ movq %rbp, 4*8+\offset(%rsp) + xorl %ebp, %ebp /* nospec rbp */ movq %r12, 3*8+\offset(%rsp) + xorq %r12, %r12 /* nospec r12 */ movq %r13, 2*8+\offset(%rsp) + xorq %r13, %r13 /* nospec r13 */ movq %r14, 1*8+\offset(%rsp) + xorq %r14, %r14 /* nospec r14 */ movq %r15, 0*8+\offset(%rsp) + xorq %r15, %r15 /* nospec r15 */ UNWIND_HINT_REGS offset=\offset .endm - /* - * Sanitize registers of values that a speculation attack - * might otherwise want to exploit. The lower registers are - * likely clobbered well before they could be put to use in - * a speculative execution gadget: - */ - .macro CLEAR_REGS_NOSPEC - xorl %ebp, %ebp - xorl %ebx, %ebx - xorq %r8, %r8 - xorq %r9, %r9 - xorq %r10, %r10 - xorq %r11, %r11 - xorq %r12, %r12 - xorq %r13, %r13 - xorq %r14, %r14 - xorq %r15, %r15 - .endm - .macro POP_REGS pop_rdi=1 skip_r11rcx=0 popq %r15 popq %r14 @@ -177,7 +175,7 @@ For 32-bit we have the following conventions - kernel is built with * is just setting the LSB, which makes it an invalid stack address and is also * a signal to the unwinder that it's a pt_regs pointer in disguise. * - * NOTE: This macro must be used *after* SAVE_REGS because it corrupts + * NOTE: This macro must be used *after* SAVE_AND_CLEAR_REGS because it corrupts * the original rbp. */ .macro ENCODE_FRAME_POINTER ptregs_offset=0 -- cgit v1.2.3 From 3f01daecd545e818098d84fd1ad43e19a508d705 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 11:49:45 +0100 Subject: x86/entry/64: Introduce the PUSH_AND_CLEAN_REGS macro Those instances where ALLOC_PT_GPREGS_ON_STACK is called just before SAVE_AND_CLEAR_REGS can trivially be replaced by PUSH_AND_CLEAN_REGS. This macro uses PUSH instead of MOV and should therefore be faster, at least on newer CPUs. Suggested-by: Linus Torvalds Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180211104949.12992-5-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar --- arch/x86/entry/calling.h | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'arch/x86/entry/calling.h') diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index a05cbb81268d..57b1b87a04f0 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -137,6 +137,42 @@ For 32-bit we have the following conventions - kernel is built with UNWIND_HINT_REGS offset=\offset .endm + .macro PUSH_AND_CLEAR_REGS + /* + * Push registers and sanitize registers of values that a + * speculation attack might otherwise want to exploit. The + * lower registers are likely clobbered well before they + * could be put to use in a speculative execution gadget. + * Interleave XOR with PUSH for better uop scheduling: + */ + pushq %rdi /* pt_regs->di */ + pushq %rsi /* pt_regs->si */ + pushq %rdx /* pt_regs->dx */ + pushq %rcx /* pt_regs->cx */ + pushq %rax /* pt_regs->ax */ + pushq %r8 /* pt_regs->r8 */ + xorq %r8, %r8 /* nospec r8 */ + pushq %r9 /* pt_regs->r9 */ + xorq %r9, %r9 /* nospec r9 */ + pushq %r10 /* pt_regs->r10 */ + xorq %r10, %r10 /* nospec r10 */ + pushq %r11 /* pt_regs->r11 */ + xorq %r11, %r11 /* nospec r11*/ + pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx*/ + pushq %rbp /* pt_regs->rbp */ + xorl %ebp, %ebp /* nospec rbp*/ + pushq %r12 /* pt_regs->r12 */ + xorq %r12, %r12 /* nospec r12*/ + pushq %r13 /* pt_regs->r13 */ + xorq %r13, %r13 /* nospec r13*/ + pushq %r14 /* pt_regs->r14 */ + xorq %r14, %r14 /* nospec r14*/ + pushq %r15 /* pt_regs->r15 */ + xorq %r15, %r15 /* nospec r15*/ + UNWIND_HINT_REGS + .endm + .macro POP_REGS pop_rdi=1 skip_r11rcx=0 popq %r15 popq %r14 -- cgit v1.2.3 From 30907fd13bb593202574bb20af58d67c70a1ee14 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 11:49:46 +0100 Subject: x86/entry/64: Use PUSH_AND_CLEAN_REGS in more cases entry_SYSCALL_64_after_hwframe() and nmi() can be converted to use PUSH_AND_CLEAN_REGS instead of opencoded variants thereof. Due to the interleaving, the additional XOR-based clearing of R8 and R9 in entry_SYSCALL_64_after_hwframe() should not have any noticeable negative implications. Suggested-by: Linus Torvalds Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180211104949.12992-6-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar --- arch/x86/entry/calling.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86/entry/calling.h') diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 57b1b87a04f0..d6a97e2945ee 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -137,7 +137,7 @@ For 32-bit we have the following conventions - kernel is built with UNWIND_HINT_REGS offset=\offset .endm - .macro PUSH_AND_CLEAR_REGS + .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax /* * Push registers and sanitize registers of values that a * speculation attack might otherwise want to exploit. The @@ -147,9 +147,9 @@ For 32-bit we have the following conventions - kernel is built with */ pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ - pushq %rdx /* pt_regs->dx */ + pushq \rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ - pushq %rax /* pt_regs->ax */ + pushq \rax /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */ xorq %r8, %r8 /* nospec r8 */ pushq %r9 /* pt_regs->r9 */ -- cgit v1.2.3 From dde3036d62ba3375840b10ab9ec0d568fd773b07 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 11:49:47 +0100 Subject: x86/entry/64: Get rid of the ALLOC_PT_GPREGS_ON_STACK and SAVE_AND_CLEAR_REGS macros Previously, error_entry() and paranoid_entry() saved the GP registers onto stack space previously allocated by its callers. Combine these two steps in the callers, and use the generic PUSH_AND_CLEAR_REGS macro for that. This adds a significant amount ot text size. However, Ingo Molnar points out that: "these numbers also _very_ significantly over-represent the extra footprint. The assumptions that resulted in us compressing the IRQ entry code have changed very significantly with the new x86 IRQ allocation code we introduced in the last year: - IRQ vectors are usually populated in tightly clustered groups. With our new vector allocator code the typical per CPU allocation percentage on x86 systems is ~3 device vectors and ~10 fixed vectors out of ~220 vectors - i.e. a very low ~6% utilization (!). [...] The days where we allocated a lot of vectors on every CPU and the compression of the IRQ entry code text mattered are over. - Another issue is that only a small minority of vectors is frequent enough to actually matter to cache utilization in practice: 3-4 key IPIs and 1-2 device IRQs at most - and those vectors tend to be tightly clustered as well into about two groups, and are probably already on 2-3 cache lines in practice. For the common case of 'cache cold' IRQs it's the depth of the call chain and the fragmentation of the resulting I$ that should be the main performance limit - not the overall size of it. - The CPU side cost of IRQ delivery is still very expensive even in the best, most cached case, as in 'over a thousand cycles'. So much stuff is done that maybe contemporary x86 IRQ entry microcode already prefetches the IDT entry and its expected call target address."[*] [*] http://lkml.kernel.org/r/20180208094710.qnjixhm6hybebdv7@gmail.com The "testb $3, CS(%rsp)" instruction in the idtentry macro does not need modification. Previously, %rsp was manually decreased by 15*8; with this patch, %rsp is decreased by 15 pushq instructions. [jpoimboe@redhat.com: unwind hint improvements] Suggested-by: Linus Torvalds Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180211104949.12992-7-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar --- arch/x86/entry/calling.h | 42 +----------------------------------------- 1 file changed, 1 insertion(+), 41 deletions(-) (limited to 'arch/x86/entry/calling.h') diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index d6a97e2945ee..59675010c9a0 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -97,46 +97,6 @@ For 32-bit we have the following conventions - kernel is built with #define SIZEOF_PTREGS 21*8 - .macro ALLOC_PT_GPREGS_ON_STACK - addq $-(15*8), %rsp - .endm - - .macro SAVE_AND_CLEAR_REGS offset=0 - /* - * Save registers and sanitize registers of values that a - * speculation attack might otherwise want to exploit. The - * lower registers are likely clobbered well before they - * could be put to use in a speculative execution gadget. - * Interleave XOR with PUSH for better uop scheduling: - */ - movq %rdi, 14*8+\offset(%rsp) - movq %rsi, 13*8+\offset(%rsp) - movq %rdx, 12*8+\offset(%rsp) - movq %rcx, 11*8+\offset(%rsp) - movq %rax, 10*8+\offset(%rsp) - movq %r8, 9*8+\offset(%rsp) - xorq %r8, %r8 /* nospec r8 */ - movq %r9, 8*8+\offset(%rsp) - xorq %r9, %r9 /* nospec r9 */ - movq %r10, 7*8+\offset(%rsp) - xorq %r10, %r10 /* nospec r10 */ - movq %r11, 6*8+\offset(%rsp) - xorq %r11, %r11 /* nospec r11 */ - movq %rbx, 5*8+\offset(%rsp) - xorl %ebx, %ebx /* nospec rbx */ - movq %rbp, 4*8+\offset(%rsp) - xorl %ebp, %ebp /* nospec rbp */ - movq %r12, 3*8+\offset(%rsp) - xorq %r12, %r12 /* nospec r12 */ - movq %r13, 2*8+\offset(%rsp) - xorq %r13, %r13 /* nospec r13 */ - movq %r14, 1*8+\offset(%rsp) - xorq %r14, %r14 /* nospec r14 */ - movq %r15, 0*8+\offset(%rsp) - xorq %r15, %r15 /* nospec r15 */ - UNWIND_HINT_REGS offset=\offset - .endm - .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax /* * Push registers and sanitize registers of values that a @@ -211,7 +171,7 @@ For 32-bit we have the following conventions - kernel is built with * is just setting the LSB, which makes it an invalid stack address and is also * a signal to the unwinder that it's a pt_regs pointer in disguise. * - * NOTE: This macro must be used *after* SAVE_AND_CLEAR_REGS because it corrupts + * NOTE: This macro must be used *after* PUSH_AND_CLEAR_REGS because it corrupts * the original rbp. */ .macro ENCODE_FRAME_POINTER ptregs_offset=0 -- cgit v1.2.3 From 92816f571af81e9a71cc6f3dc8ce1e2fcdf7b6b8 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 11:49:48 +0100 Subject: x86/entry/64: Indent PUSH_AND_CLEAR_REGS and POP_REGS properly ... same as the other macros in arch/x86/entry/calling.h Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180211104949.12992-8-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar --- arch/x86/entry/calling.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86/entry/calling.h') diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 59675010c9a0..6985440c68fa 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -97,7 +97,7 @@ For 32-bit we have the following conventions - kernel is built with #define SIZEOF_PTREGS 21*8 - .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax +.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax /* * Push registers and sanitize registers of values that a * speculation attack might otherwise want to exploit. The @@ -131,9 +131,9 @@ For 32-bit we have the following conventions - kernel is built with pushq %r15 /* pt_regs->r15 */ xorq %r15, %r15 /* nospec r15*/ UNWIND_HINT_REGS - .endm +.endm - .macro POP_REGS pop_rdi=1 skip_r11rcx=0 +.macro POP_REGS pop_rdi=1 skip_r11rcx=0 popq %r15 popq %r14 popq %r13 @@ -163,7 +163,7 @@ For 32-bit we have the following conventions - kernel is built with .macro icebp .byte 0xf1 - .endm +.endm /* * This is a sneaky trick to help the unwinder find pt_regs on the stack. The -- cgit v1.2.3 From b498c261107461d5c42140dfddd05df83d8ca078 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 12 Feb 2018 21:13:18 +0100 Subject: x86/entry/64: Remove the unused 'icebp' macro That macro was touched around 2.5.8 times, judging by the full history linux repo, but it was unused even then. Get rid of it already. Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux@dominikbrodowski.net Link: http://lkml.kernel.org/r/20180212201318.GD14640@pd.tnic Signed-off-by: Ingo Molnar --- arch/x86/entry/calling.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/x86/entry/calling.h') diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 6985440c68fa..dce7092ab24a 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -159,10 +159,6 @@ For 32-bit we have the following conventions - kernel is built with .if \pop_rdi popq %rdi .endif - .endm - - .macro icebp - .byte 0xf1 .endm /* -- cgit v1.2.3