From e8025bab7bfbd48d262c01c26c15a9d465fbb083 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Wed, 24 Apr 2019 12:34:46 -0400 Subject: function_graph: Place ftrace_graph_entry_stub() prototype in include/linux/ftrace.h ftrace_graph_entry_stub() is defined in generic code, its prototype should be in the generic header and not defined throughout architecture specific code in order to use it. Cc: Greentime Hu Cc: Vincent Chen Cc: "James E.J. Bottomley" Cc: Helge Deller Cc: linux-parisc@vger.kernel.org Signed-off-by: Steven Rostedt (VMware) --- arch/nds32/kernel/ftrace.c | 1 - arch/parisc/kernel/ftrace.c | 1 - 2 files changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c index 8a41372551ff..fd2a54b8cd57 100644 --- a/arch/nds32/kernel/ftrace.c +++ b/arch/nds32/kernel/ftrace.c @@ -7,7 +7,6 @@ #ifndef CONFIG_DYNAMIC_FTRACE extern void (*ftrace_trace_function)(unsigned long, unsigned long, struct ftrace_ops*, struct pt_regs*); -extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); extern void ftrace_graph_caller(void); noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip, diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index e46a4157a894..a28f915993b1 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -51,7 +51,6 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent, unsigned long org_sp_gr3) { extern ftrace_func_t ftrace_trace_function; /* depends on CONFIG_DYNAMIC_FTRACE */ - extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); if (ftrace_trace_function != ftrace_stub) { /* struct ftrace_ops *op, struct pt_regs *regs); */ -- cgit v1.2.3 From 2700fefdb2d9751c416ad56897e27d41e409324a Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Fri, 30 Nov 2018 12:39:17 -0600 Subject: x86_64: Add gap to int3 to allow for call emulation To allow an int3 handler to emulate a call instruction, it must be able to push a return address onto the stack. Add a gap to the stack to allow the int3 handler to push the return address and change the return from int3 to jump straight to the emulated called function target. Link: http://lkml.kernel.org/r/20181130183917.hxmti5josgq4clti@treble Link: http://lkml.kernel.org/r/20190502162133.GX2623@hirez.programming.kicks-ass.net [ Note, this is needed to allow Live Kernel Patching to not miss calling a patched function when tracing is enabled. -- Steven Rostedt ] Cc: stable@vger.kernel.org Fixes: b700e7f03df5 ("livepatch: kernel: add support for live patching") Tested-by: Nicolai Stange Reviewed-by: Nicolai Stange Reviewed-by: Masami Hiramatsu Signed-off-by: Josh Poimboeuf Signed-off-by: Steven Rostedt (VMware) --- arch/x86/entry/entry_64.S | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 1f0efdb7b629..27fcc6fbdd52 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -879,7 +879,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt * @paranoid == 2 is special: the stub will never switch stacks. This is for * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS. */ -.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 +.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 create_gap=0 ENTRY(\sym) UNWIND_HINT_IRET_REGS offset=\has_error_code*8 @@ -899,6 +899,20 @@ ENTRY(\sym) jnz .Lfrom_usermode_switch_stack_\@ .endif + .if \create_gap == 1 + /* + * If coming from kernel space, create a 6-word gap to allow the + * int3 handler to emulate a call instruction. + */ + testb $3, CS-ORIG_RAX(%rsp) + jnz .Lfrom_usermode_no_gap_\@ + .rept 6 + pushq 5*8(%rsp) + .endr + UNWIND_HINT_IRET_REGS offset=8 +.Lfrom_usermode_no_gap_\@: + .endif + .if \paranoid call paranoid_entry .else @@ -1130,7 +1144,7 @@ apicinterrupt3 HYPERV_STIMER0_VECTOR \ #endif /* CONFIG_HYPERV */ idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK -idtentry int3 do_int3 has_error_code=0 +idtentry int3 do_int3 has_error_code=0 create_gap=1 idtentry stack_segment do_stack_segment has_error_code=1 #ifdef CONFIG_XEN_PV -- cgit v1.2.3 From 4b33dadf37666c0860b88f9e52a16d07bf6d0b03 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 1 May 2019 15:11:17 +0200 Subject: x86_64: Allow breakpoints to emulate call instructions In order to allow breakpoints to emulate call instructions, they need to push the return address onto the stack. The x86_64 int3 handler adds a small gap to allow the stack to grow some. Use this gap to add the return address to be able to emulate a call instruction at the breakpoint location. These helper functions are added: int3_emulate_jmp(): changes the location of the regs->ip to return there. (The next two are only for x86_64) int3_emulate_push(): to push the address onto the gap in the stack int3_emulate_call(): push the return address and change regs->ip Cc: Andy Lutomirski Cc: Nicolai Stange Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: the arch/x86 maintainers Cc: Josh Poimboeuf Cc: Jiri Kosina Cc: Miroslav Benes Cc: Petr Mladek Cc: Joe Lawrence Cc: Shuah Khan Cc: Konrad Rzeszutek Wilk Cc: Tim Chen Cc: Sebastian Andrzej Siewior Cc: Mimi Zohar Cc: Juergen Gross Cc: Nick Desaulniers Cc: Nayna Jain Cc: Masahiro Yamada Cc: Joerg Roedel Cc: "open list:KERNEL SELFTEST FRAMEWORK" Cc: stable@vger.kernel.org Fixes: b700e7f03df5 ("livepatch: kernel: add support for live patching") Tested-by: Nicolai Stange Reviewed-by: Nicolai Stange Reviewed-by: Masami Hiramatsu Signed-off-by: Peter Zijlstra (Intel) [ Modified to only work for x86_64 and added comment to int3_emulate_push() ] Signed-off-by: Steven Rostedt (VMware) --- arch/x86/include/asm/text-patching.h | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h index e85ff65c43c3..05861cc08787 100644 --- a/arch/x86/include/asm/text-patching.h +++ b/arch/x86/include/asm/text-patching.h @@ -39,4 +39,32 @@ extern int poke_int3_handler(struct pt_regs *regs); extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); extern int after_bootmem; +static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) +{ + regs->ip = ip; +} + +#define INT3_INSN_SIZE 1 +#define CALL_INSN_SIZE 5 + +#ifdef CONFIG_X86_64 +static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val) +{ + /* + * The int3 handler in entry_64.S adds a gap between the + * stack where the break point happened, and the saving of + * pt_regs. We can extend the original stack because of + * this gap. See the idtentry macro's create_gap option. + */ + regs->sp -= sizeof(unsigned long); + *(unsigned long *)regs->sp = val; +} + +static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) +{ + int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); + int3_emulate_jmp(regs, func); +} +#endif + #endif /* _ASM_X86_TEXT_PATCHING_H */ -- cgit v1.2.3 From 9e298e8604088a600d8100a111a532a9d342af09 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 1 May 2019 15:11:17 +0200 Subject: ftrace/x86_64: Emulate call function while updating in breakpoint handler Nicolai Stange discovered[1] that if live kernel patching is enabled, and the function tracer started tracing the same function that was patched, the conversion of the fentry call site during the translation of going from calling the live kernel patch trampoline to the iterator trampoline, would have as slight window where it didn't call anything. As live kernel patching depends on ftrace to always call its code (to prevent the function being traced from being called, as it will redirect it). This small window would allow the old buggy function to be called, and this can cause undesirable results. Nicolai submitted new patches[2] but these were controversial. As this is similar to the static call emulation issues that came up a while ago[3]. But after some debate[4][5] adding a gap in the stack when entering the breakpoint handler allows for pushing the return address onto the stack to easily emulate a call. [1] http://lkml.kernel.org/r/20180726104029.7736-1-nstange@suse.de [2] http://lkml.kernel.org/r/20190427100639.15074-1-nstange@suse.de [3] http://lkml.kernel.org/r/3cf04e113d71c9f8e4be95fb84a510f085aa4afa.1541711457.git.jpoimboe@redhat.com [4] http://lkml.kernel.org/r/CAHk-=wh5OpheSU8Em_Q3Hg8qw_JtoijxOdPtHru6d+5K8TWM=A@mail.gmail.com [5] http://lkml.kernel.org/r/CAHk-=wjvQxY4DvPrJ6haPgAa6b906h=MwZXO6G8OtiTGe=N7_w@mail.gmail.com [ Live kernel patching is not implemented on x86_32, thus the emulate calls are only for x86_64. ] Cc: Andy Lutomirski Cc: Nicolai Stange Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: the arch/x86 maintainers Cc: Josh Poimboeuf Cc: Jiri Kosina Cc: Miroslav Benes Cc: Petr Mladek Cc: Joe Lawrence Cc: Shuah Khan Cc: Konrad Rzeszutek Wilk Cc: Tim Chen Cc: Sebastian Andrzej Siewior Cc: Mimi Zohar Cc: Juergen Gross Cc: Nick Desaulniers Cc: Nayna Jain Cc: Masahiro Yamada Cc: Joerg Roedel Cc: "open list:KERNEL SELFTEST FRAMEWORK" Cc: stable@vger.kernel.org Fixes: b700e7f03df5 ("livepatch: kernel: add support for live patching") Tested-by: Nicolai Stange Reviewed-by: Nicolai Stange Reviewed-by: Masami Hiramatsu Signed-off-by: Peter Zijlstra (Intel) [ Changed to only implement emulated calls for x86_64 ] Signed-off-by: Steven Rostedt (VMware) --- arch/x86/kernel/ftrace.c | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index ef49517f6bb2..bd553b3af22e 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -29,6 +29,7 @@ #include #include #include +#include #ifdef CONFIG_DYNAMIC_FTRACE @@ -231,6 +232,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, } static unsigned long ftrace_update_func; +static unsigned long ftrace_update_func_call; static int update_ftrace_func(unsigned long ip, void *new) { @@ -259,6 +261,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func) unsigned char *new; int ret; + ftrace_update_func_call = (unsigned long)func; + new = ftrace_call_replace(ip, (unsigned long)func); ret = update_ftrace_func(ip, new); @@ -294,13 +298,28 @@ int ftrace_int3_handler(struct pt_regs *regs) if (WARN_ON_ONCE(!regs)) return 0; - ip = regs->ip - 1; - if (!ftrace_location(ip) && !is_ftrace_caller(ip)) - return 0; + ip = regs->ip - INT3_INSN_SIZE; - regs->ip += MCOUNT_INSN_SIZE - 1; +#ifdef CONFIG_X86_64 + if (ftrace_location(ip)) { + int3_emulate_call(regs, (unsigned long)ftrace_regs_caller); + return 1; + } else if (is_ftrace_caller(ip)) { + if (!ftrace_update_func_call) { + int3_emulate_jmp(regs, ip + CALL_INSN_SIZE); + return 1; + } + int3_emulate_call(regs, ftrace_update_func_call); + return 1; + } +#else + if (ftrace_location(ip) || is_ftrace_caller(ip)) { + int3_emulate_jmp(regs, ip + CALL_INSN_SIZE); + return 1; + } +#endif - return 1; + return 0; } NOKPROBE_SYMBOL(ftrace_int3_handler); @@ -859,6 +878,8 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) func = ftrace_ops_get_func(ops); + ftrace_update_func_call = (unsigned long)func; + /* Do a safe modify in case the trampoline is executing */ new = ftrace_call_replace(ip, (unsigned long)func); ret = update_ftrace_func(ip, new); @@ -960,6 +981,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func) { unsigned char *new; + ftrace_update_func_call = 0UL; new = ftrace_jmp_replace(ip, (unsigned long)func); return update_ftrace_func(ip, new); -- cgit v1.2.3 From 518049d9d3e25dcd7be6e3d728e86f292ad0c922 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Fri, 10 May 2019 12:05:46 -0400 Subject: ftrace/x86_32: Remove support for non DYNAMIC_FTRACE When DYNAMIC_FTRACE is enabled in the kernel, all the functions that can be traced by the function tracer have a "nop" placeholder at the start of the function. When function tracing is enabled, the nop is converted into a call to the tracing infrastructure where the functions get traced. This also allows for specifying specific functions to trace, and a lot of infrastructure is built on top of this. When DYNAMIC_FTRACE is not enabled, all the functions have a call to the ftrace trampoline. A check is made to see if a function pointer is the ftrace_stub or not, and if it is not, it calls the function pointer to trace the code. This adds over 10% overhead to the kernel even when tracing is disabled. When an architecture supports DYNAMIC_FTRACE there really is no reason to use the static tracing. I have kept non DYNAMIC_FTRACE available in x86 so that the generic code for non DYNAMIC_FTRACE can be tested. There is no reason to support non DYNAMIC_FTRACE for both x86_64 and x86_32. As the non DYNAMIC_FTRACE for x86_32 does not even support fentry, and we want to remove mcount completely, there's no reason to keep non DYNAMIC_FTRACE around for x86_32. Signed-off-by: Steven Rostedt (VMware) --- arch/x86/Kconfig | 11 +++++++++++ arch/x86/kernel/ftrace_32.S | 39 --------------------------------------- 2 files changed, 11 insertions(+), 39 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5ad92419be19..0544041ae3a2 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -31,6 +31,17 @@ config X86_64 select X86_DEV_DMA_OPS select ARCH_HAS_SYSCALL_WRAPPER +config FORCE_DYNAMIC_FTRACE + def_bool y + depends on X86_32 + depends on FUNCTION_TRACER + select DYNAMIC_FTRACE + help + We keep the static function tracing (!DYNAMIC_FTRACE) around + in order to test the non static function tracing in the + generic code, as other architectures still use it. But we + only need to keep it around for x86_64. No need to keep it + for x86_32. For x86_32, force DYNAMIC_FTRACE. # # Arch settings # diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S index 4c8440de3355..459e6b4a19bc 100644 --- a/arch/x86/kernel/ftrace_32.S +++ b/arch/x86/kernel/ftrace_32.S @@ -18,8 +18,6 @@ EXPORT_SYMBOL(__fentry__) EXPORT_SYMBOL(mcount) #endif -#ifdef CONFIG_DYNAMIC_FTRACE - /* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */ #if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER) # define USING_FRAME_POINTER @@ -170,43 +168,6 @@ GLOBAL(ftrace_regs_call) lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */ jmp .Lftrace_ret -#else /* ! CONFIG_DYNAMIC_FTRACE */ - -ENTRY(function_hook) - cmpl $__PAGE_OFFSET, %esp - jb ftrace_stub /* Paging not enabled yet? */ - - cmpl $ftrace_stub, ftrace_trace_function - jnz .Ltrace -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - cmpl $ftrace_stub, ftrace_graph_return - jnz ftrace_graph_caller - - cmpl $ftrace_graph_entry_stub, ftrace_graph_entry - jnz ftrace_graph_caller -#endif -.globl ftrace_stub -ftrace_stub: - ret - - /* taken from glibc */ -.Ltrace: - pushl %eax - pushl %ecx - pushl %edx - movl 0xc(%esp), %eax - movl 0x4(%ebp), %edx - subl $MCOUNT_INSN_SIZE, %eax - - movl ftrace_trace_function, %ecx - CALL_NOSPEC %ecx - - popl %edx - popl %ecx - popl %eax - jmp ftrace_stub -END(function_hook) -#endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) -- cgit v1.2.3 From 562e14f72292249e52e6346a9e3a30be652b0cf6 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 9 May 2019 15:32:05 -0400 Subject: ftrace/x86: Remove mcount support There's two methods of enabling function tracing in Linux on x86. One is with just "gcc -pg" and the other is "gcc -pg -mfentry". The former will use calls to a special function "mcount" after the frame is set up in all C functions. The latter will add calls to a special function called "fentry" as the very first instruction of all C functions. At compile time, there is a check to see if gcc supports, -mfentry, and if it does, it will use that, because it is more versatile and less error prone for function tracing. Starting with v4.19, the minimum gcc supported to build the Linux kernel, was raised to version 4.6. That also happens to be the first gcc version to support -mfentry. Since on x86, using gcc versions from 4.6 and beyond will unconditionally enable the -mfentry, it will no longer use mcount as the method for inserting calls into the C functions of the kernel. This means that there is no point in continuing to maintain mcount in x86. Remove support for using mcount. This makes the code less complex, and will also allow it to be simplified in the future. Acked-by: Peter Zijlstra (Intel) Acked-by: Jiri Kosina Acked-by: Josh Poimboeuf Signed-off-by: Steven Rostedt (VMware) --- arch/x86/include/asm/ftrace.h | 8 +++----- arch/x86/include/asm/livepatch.h | 3 --- arch/x86/kernel/ftrace_32.S | 36 +++++------------------------------- arch/x86/kernel/ftrace_64.S | 28 +--------------------------- 4 files changed, 9 insertions(+), 66 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index cf350639e76d..287f1f7b2e52 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -3,12 +3,10 @@ #define _ASM_X86_FTRACE_H #ifdef CONFIG_FUNCTION_TRACER -#ifdef CC_USING_FENTRY -# define MCOUNT_ADDR ((unsigned long)(__fentry__)) -#else -# define MCOUNT_ADDR ((unsigned long)(mcount)) -# define HAVE_FUNCTION_GRAPH_FP_TEST +#ifndef CC_USING_FENTRY +# error Compiler does not support fentry? #endif +# define MCOUNT_ADDR ((unsigned long)(__fentry__)) #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index ed80003ce3e2..2f2bdf0662f8 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -26,9 +26,6 @@ static inline int klp_check_compiler_support(void) { -#ifndef CC_USING_FENTRY - return 1; -#endif return 0; } diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S index 459e6b4a19bc..2ba914a34b06 100644 --- a/arch/x86/kernel/ftrace_32.S +++ b/arch/x86/kernel/ftrace_32.S @@ -10,20 +10,10 @@ #include #include -#ifdef CC_USING_FENTRY # define function_hook __fentry__ EXPORT_SYMBOL(__fentry__) -#else -# define function_hook mcount -EXPORT_SYMBOL(mcount) -#endif - -/* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */ -#if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER) -# define USING_FRAME_POINTER -#endif -#ifdef USING_FRAME_POINTER +#ifdef CONFIG_FRAME_POINTER # define MCOUNT_FRAME 1 /* using frame = true */ #else # define MCOUNT_FRAME 0 /* using frame = false */ @@ -35,8 +25,7 @@ END(function_hook) ENTRY(ftrace_caller) -#ifdef USING_FRAME_POINTER -# ifdef CC_USING_FENTRY +#ifdef CONFIG_FRAME_POINTER /* * Frame pointers are of ip followed by bp. * Since fentry is an immediate jump, we are left with @@ -47,7 +36,7 @@ ENTRY(ftrace_caller) pushl %ebp movl %esp, %ebp pushl 2*4(%esp) /* function ip */ -# endif + /* For mcount, the function ip is directly above */ pushl %ebp movl %esp, %ebp @@ -57,7 +46,7 @@ ENTRY(ftrace_caller) pushl %edx pushl $0 /* Pass NULL as regs pointer */ -#ifdef USING_FRAME_POINTER +#ifdef CONFIG_FRAME_POINTER /* Load parent ebp into edx */ movl 4*4(%esp), %edx #else @@ -80,13 +69,11 @@ ftrace_call: popl %edx popl %ecx popl %eax -#ifdef USING_FRAME_POINTER +#ifdef CONFIG_FRAME_POINTER popl %ebp -# ifdef CC_USING_FENTRY addl $4,%esp /* skip function ip */ popl %ebp /* this is the orig bp */ addl $4, %esp /* skip parent ip */ -# endif #endif .Lftrace_ret: #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -131,11 +118,7 @@ ENTRY(ftrace_regs_caller) movl 12*4(%esp), %eax /* Load ip (1st parameter) */ subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ -#ifdef CC_USING_FENTRY movl 15*4(%esp), %edx /* Load parent ip (2nd parameter) */ -#else - movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ -#endif movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ pushl %esp /* Save pt_regs as 4th parameter */ @@ -176,13 +159,8 @@ ENTRY(ftrace_graph_caller) pushl %edx movl 3*4(%esp), %eax /* Even with frame pointers, fentry doesn't have one here */ -#ifdef CC_USING_FENTRY lea 4*4(%esp), %edx movl $0, %ecx -#else - lea 0x4(%ebp), %edx - movl (%ebp), %ecx -#endif subl $MCOUNT_INSN_SIZE, %eax call prepare_ftrace_return popl %edx @@ -195,11 +173,7 @@ END(ftrace_graph_caller) return_to_handler: pushl %eax pushl %edx -#ifdef CC_USING_FENTRY movl $0, %eax -#else - movl %ebp, %eax -#endif call ftrace_return_to_handler movl %eax, %ecx popl %edx diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 75f2b36b41a6..10eb2760ef2c 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -13,22 +13,12 @@ .code64 .section .entry.text, "ax" -#ifdef CC_USING_FENTRY # define function_hook __fentry__ EXPORT_SYMBOL(__fentry__) -#else -# define function_hook mcount -EXPORT_SYMBOL(mcount) -#endif #ifdef CONFIG_FRAME_POINTER -# ifdef CC_USING_FENTRY /* Save parent and function stack frames (rip and rbp) */ # define MCOUNT_FRAME_SIZE (8+16*2) -# else -/* Save just function stack frame (rip and rbp) */ -# define MCOUNT_FRAME_SIZE (8+16) -# endif #else /* No need to save a stack frame */ # define MCOUNT_FRAME_SIZE 0 @@ -75,17 +65,13 @@ EXPORT_SYMBOL(mcount) * fentry is called before the stack frame is set up, where as mcount * is called afterward. */ -#ifdef CC_USING_FENTRY + /* Save the parent pointer (skip orig rbp and our return address) */ pushq \added+8*2(%rsp) pushq %rbp movq %rsp, %rbp /* Save the return address (now skip orig rbp, rbp and parent) */ pushq \added+8*3(%rsp) -#else - /* Can't assume that rip is before this (unless added was zero) */ - pushq \added+8(%rsp) -#endif pushq %rbp movq %rsp, %rbp #endif /* CONFIG_FRAME_POINTER */ @@ -113,12 +99,7 @@ EXPORT_SYMBOL(mcount) movq %rdx, RBP(%rsp) /* Copy the parent address into %rsi (second parameter) */ -#ifdef CC_USING_FENTRY movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi -#else - /* %rdx contains original %rbp */ - movq 8(%rdx), %rsi -#endif /* Move RIP to its proper location */ movq MCOUNT_REG_SIZE+\added(%rsp), %rdi @@ -303,15 +284,8 @@ ENTRY(ftrace_graph_caller) /* Saves rbp into %rdx and fills first parameter */ save_mcount_regs -#ifdef CC_USING_FENTRY leaq MCOUNT_REG_SIZE+8(%rsp), %rsi movq $0, %rdx /* No framepointers needed */ -#else - /* Save address of the return address of traced function */ - leaq 8(%rdx), %rsi - /* ftrace does sanity checks against frame pointers */ - movq (%rdx), %rdx -#endif call prepare_ftrace_return restore_mcount_regs -- cgit v1.2.3 From 56e33afd7757d5da2664fb797f2544ce167176be Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Fri, 10 May 2019 23:47:50 +0200 Subject: livepatch: Remove klp_check_compiler_support() The only purpose of klp_check_compiler_support() is to make sure that we are not using ftrace on x86 via mcount (because that's executed only after prologue has already happened, and that's too late for livepatching purposes). Now that mcount is not supported by ftrace any more, there is no need for klp_check_compiler_support() either. Link: http://lkml.kernel.org/r/nycvar.YFH.7.76.1905102346100.17054@cbobk.fhfr.pm Reported-by: Linus Torvalds Signed-off-by: Jiri Kosina Signed-off-by: Steven Rostedt (VMware) --- arch/powerpc/include/asm/livepatch.h | 5 ----- arch/s390/include/asm/livepatch.h | 5 ----- arch/x86/include/asm/livepatch.h | 5 ----- 3 files changed, 15 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h index 5070df19d463..c005aee5ea43 100644 --- a/arch/powerpc/include/asm/livepatch.h +++ b/arch/powerpc/include/asm/livepatch.h @@ -24,11 +24,6 @@ #include #ifdef CONFIG_LIVEPATCH -static inline int klp_check_compiler_support(void) -{ - return 0; -} - static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) { regs->nip = ip; diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h index 672f95b12d40..818612b784cd 100644 --- a/arch/s390/include/asm/livepatch.h +++ b/arch/s390/include/asm/livepatch.h @@ -13,11 +13,6 @@ #include -static inline int klp_check_compiler_support(void) -{ - return 0; -} - static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) { regs->psw.addr = ip; diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index 2f2bdf0662f8..a66f6706c2de 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -24,11 +24,6 @@ #include #include -static inline int klp_check_compiler_support(void) -{ - return 0; -} - static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) { regs->ip = ip; -- cgit v1.2.3 From 693713cbdb3a4bda5a8a678c31f06560bbb14657 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Sat, 11 May 2019 08:32:40 -0400 Subject: x86: Hide the int3_emulate_call/jmp functions from UML User Mode Linux does not have access to the ip or sp fields of the pt_regs, and accessing them causes UML to fail to build. Hide the int3_emulate_jmp() and int3_emulate_call() instructions from UML, as it doesn't need them anyway. Reported-by: kbuild test robot Signed-off-by: Steven Rostedt (VMware) --- arch/x86/include/asm/text-patching.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h index 05861cc08787..0bbb07eaed6b 100644 --- a/arch/x86/include/asm/text-patching.h +++ b/arch/x86/include/asm/text-patching.h @@ -39,6 +39,7 @@ extern int poke_int3_handler(struct pt_regs *regs); extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); extern int after_bootmem; +#ifndef CONFIG_UML_X86 static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) { regs->ip = ip; @@ -65,6 +66,7 @@ static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); int3_emulate_jmp(regs, func); } -#endif +#endif /* CONFIG_X86_64 */ +#endif /* !CONFIG_UML_X86 */ #endif /* _ASM_X86_TEXT_PATCHING_H */ -- cgit v1.2.3