diff options
author | Andy Lutomirski <luto@kernel.org> | 2015-10-05 17:48:19 -0700 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-10-09 09:41:12 +0200 |
commit | 33c52129f45e06d9ce23e1a3d50bf9fd6770748b (patch) | |
tree | 7252b2e12e4441966f13a7519649308aef421bcb /arch/x86/entry/common.c | |
parent | 460d12453e1afe20416ce9536cfecb31d17a9abd (diff) | |
download | linux-33c52129f45e06d9ce23e1a3d50bf9fd6770748b.tar.gz linux-33c52129f45e06d9ce23e1a3d50bf9fd6770748b.tar.bz2 linux-33c52129f45e06d9ce23e1a3d50bf9fd6770748b.zip |
x86/entry: Force inlining of 32-bit syscall code
On systems that support fast syscalls, we only really care about
the performance of the fast syscall path. Forcibly inline it
and add a likely annotation.
This saves 4-6 cycles.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/8472036ff1f4b426b4c4c3e3d0b3bf5264407c0c.1444091585.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/entry/common.c')
-rw-r--r-- | arch/x86/entry/common.c | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 03aacd188458..d5eee851071c 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -324,9 +324,11 @@ __visible void syscall_return_slowpath(struct pt_regs *regs) #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) /* * Does a 32-bit syscall. Called with IRQs on and does all entry and - * exit work and returns with IRQs off. + * exit work and returns with IRQs off. This function is extremely hot + * in workloads that use it, and it's usually called from + * do_fast_syscall_32, so forcibly inline it to improve performance. */ -static void do_syscall_32_irqs_on(struct pt_regs *regs) +static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs) { struct thread_info *ti = pt_regs_to_thread_info(regs); unsigned int nr = (unsigned int)regs->orig_ax; @@ -345,7 +347,7 @@ static void do_syscall_32_irqs_on(struct pt_regs *regs) nr = syscall_trace_enter(regs); } - if (nr < IA32_NR_syscalls) { + if (likely(nr < IA32_NR_syscalls)) { /* * It's possible that a 32-bit syscall implementation * takes a 64-bit parameter but nonetheless assumes that |