summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-08-04 21:05:46 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-04 21:05:46 -0700
commit125cfa0d4d143416ae217c26a72003baae93233d (patch)
treefb29ea571b34bf591789deb70cb6a263ba01bbdf /arch/x86/kernel
parent3f0d6ecdf1ab35ac54cabb759f748fb0bffd26a5 (diff)
parentadb334d17858d8b679a41f7f2cd230e5c6accc0a (diff)
downloadlinux-125cfa0d4d143416ae217c26a72003baae93233d.tar.gz
linux-125cfa0d4d143416ae217c26a72003baae93233d.tar.bz2
linux-125cfa0d4d143416ae217c26a72003baae93233d.zip
Merge tag 'x86-entry-2020-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 conversion to generic entry code from Thomas Gleixner: "The conversion of X86 syscall, interrupt and exception entry/exit handling to the generic code. Pretty much a straight-forward 1:1 conversion plus the consolidation of the KVM handling of pending work before entering guest mode" * tag 'x86-entry-2020-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/kvm: Use __xfer_to_guest_mode_work_pending() in kvm_run_vcpu() x86/kvm: Use generic xfer to guest work function x86/entry: Cleanup idtentry_enter/exit x86/entry: Use generic interrupt entry/exit code x86/entry: Cleanup idtentry_entry/exit_user x86/entry: Use generic syscall exit functionality x86/entry: Use generic syscall entry function x86/ptrace: Provide pt_regs helper for entry/exit x86/entry: Move user return notifier out of loop x86/entry: Consolidate 32/64 bit syscall entry x86/entry: Consolidate check_user_regs() x86: Correct noinstr qualifiers x86/idtentry: Remove stale comment
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/cpu/mce/core.c6
-rw-r--r--arch/x86/kernel/kvm.c6
-rw-r--r--arch/x86/kernel/signal.c3
-rw-r--r--arch/x86/kernel/traps.c24
5 files changed, 21 insertions, 20 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 3abc1316f91b..c826cddae157 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -1047,7 +1047,7 @@ static __always_inline int patch_cmp(const void *key, const void *elt)
return 0;
}
-int noinstr poke_int3_handler(struct pt_regs *regs)
+noinstr int poke_int3_handler(struct pt_regs *regs)
{
struct bp_patching_desc *desc;
struct text_poke_loc *tp;
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index e76c1ddd35e7..f43a78bde670 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -1215,7 +1215,7 @@ static void kill_me_maybe(struct callback_head *cb)
* backing the user stack, tracing that reads the user stack will cause
* potentially infinite recursion.
*/
-void noinstr do_machine_check(struct pt_regs *regs)
+noinstr void do_machine_check(struct pt_regs *regs)
{
DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
DECLARE_BITMAP(toclear, MAX_NR_BANKS);
@@ -1930,11 +1930,11 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
static __always_inline void exc_machine_check_user(struct pt_regs *regs)
{
- idtentry_enter_user(regs);
+ irqentry_enter_from_user_mode(regs);
instrumentation_begin();
machine_check_vector(regs);
instrumentation_end();
- idtentry_exit_user(regs);
+ irqentry_exit_to_user_mode(regs);
}
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 3f78482d9496..233c77d056c9 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{
u32 reason = kvm_read_and_reset_apf_flags();
- idtentry_state_t state;
+ irqentry_state_t state;
switch (reason) {
case KVM_PV_REASON_PAGE_NOT_PRESENT:
@@ -243,7 +243,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
return false;
}
- state = idtentry_enter(regs);
+ state = irqentry_enter(regs);
instrumentation_begin();
/*
@@ -264,7 +264,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
}
instrumentation_end();
- idtentry_exit(regs, state);
+ irqentry_exit(regs, state);
return true;
}
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 399f97abee02..d5fa494c2304 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -25,6 +25,7 @@
#include <linux/user-return-notifier.h>
#include <linux/uprobes.h>
#include <linux/context_tracking.h>
+#include <linux/entry-common.h>
#include <linux/syscalls.h>
#include <asm/processor.h>
@@ -803,7 +804,7 @@ static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-void do_signal(struct pt_regs *regs)
+void arch_do_signal(struct pt_regs *regs)
{
struct ksignal ksig;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 8493f55e1167..438fc554d48d 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -245,7 +245,7 @@ static noinstr bool handle_bug(struct pt_regs *regs)
DEFINE_IDTENTRY_RAW(exc_invalid_op)
{
- idtentry_state_t state;
+ irqentry_state_t state;
/*
* We use UD2 as a short encoding for 'CALL __WARN', as such
@@ -255,11 +255,11 @@ DEFINE_IDTENTRY_RAW(exc_invalid_op)
if (!user_mode(regs) && handle_bug(regs))
return;
- state = idtentry_enter(regs);
+ state = irqentry_enter(regs);
instrumentation_begin();
handle_invalid_op(regs);
instrumentation_end();
- idtentry_exit(regs, state);
+ irqentry_exit(regs, state);
}
DEFINE_IDTENTRY(exc_coproc_segment_overrun)
@@ -638,18 +638,18 @@ DEFINE_IDTENTRY_RAW(exc_int3)
return;
/*
- * idtentry_enter_user() uses static_branch_{,un}likely() and therefore
- * can trigger INT3, hence poke_int3_handler() must be done
- * before. If the entry came from kernel mode, then use nmi_enter()
- * because the INT3 could have been hit in any context including
- * NMI.
+ * irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
+ * and therefore can trigger INT3, hence poke_int3_handler() must
+ * be done before. If the entry came from kernel mode, then use
+ * nmi_enter() because the INT3 could have been hit in any context
+ * including NMI.
*/
if (user_mode(regs)) {
- idtentry_enter_user(regs);
+ irqentry_enter_from_user_mode(regs);
instrumentation_begin();
do_int3_user(regs);
instrumentation_end();
- idtentry_exit_user(regs);
+ irqentry_exit_to_user_mode(regs);
} else {
bool irq_state = idtentry_enter_nmi(regs);
instrumentation_begin();
@@ -895,13 +895,13 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
*/
WARN_ON_ONCE(!user_mode(regs));
- idtentry_enter_user(regs);
+ irqentry_enter_from_user_mode(regs);
instrumentation_begin();
handle_debug(regs, dr6, true);
instrumentation_end();
- idtentry_exit_user(regs);
+ irqentry_exit_to_user_mode(regs);
}
#ifdef CONFIG_X86_64