summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYang Jihong <yangjihong1@huawei.com>2023-02-21 08:49:16 +0900
committerMasami Hiramatsu (Google) <mhiramat@kernel.org>2023-02-21 08:49:16 +0900
commit868a6fc0ca2407622d2833adefe1c4d284766c4c (patch)
tree0252e31d0dd357bf8a560f0c7e72698957256c8a
parent4fbd2f83fda0ca44a2ec6421ca3508b355b31858 (diff)
downloadlinux-stable-868a6fc0ca2407622d2833adefe1c4d284766c4c.tar.gz
linux-stable-868a6fc0ca2407622d2833adefe1c4d284766c4c.tar.bz2
linux-stable-868a6fc0ca2407622d2833adefe1c4d284766c4c.zip
x86/kprobes: Fix __recover_optprobed_insn check optimizing logic
Since the following commit: commit f66c0447cca1 ("kprobes: Set unoptimized flag after unoptimizing code") modified the update timing of the KPROBE_FLAG_OPTIMIZED, a optimized_kprobe may be in the optimizing or unoptimizing state when op.kp->flags has KPROBE_FLAG_OPTIMIZED and op->list is not empty. The __recover_optprobed_insn check logic is incorrect, a kprobe in the unoptimizing state may be incorrectly determined as unoptimizing. As a result, incorrect instructions are copied. The optprobe_queued_unopt function needs to be exported for invoking in arch directory. Link: https://lore.kernel.org/all/20230216034247.32348-2-yangjihong1@huawei.com/ Fixes: f66c0447cca1 ("kprobes: Set unoptimized flag after unoptimizing code") Cc: stable@vger.kernel.org Signed-off-by: Yang Jihong <yangjihong1@huawei.com> Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
-rw-r--r--arch/x86/kernel/kprobes/opt.c4
-rw-r--r--include/linux/kprobes.h1
-rw-r--r--kernel/kprobes.c2
3 files changed, 4 insertions, 3 deletions
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index e57e07b0edb6..f406bfa9a8cd 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -46,8 +46,8 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
/* This function only handles jump-optimized kprobe */
if (kp && kprobe_optimized(kp)) {
op = container_of(kp, struct optimized_kprobe, kp);
- /* If op->list is not empty, op is under optimizing */
- if (list_empty(&op->list))
+ /* If op is optimized or under unoptimizing */
+ if (list_empty(&op->list) || optprobe_queued_unopt(op))
goto found;
}
}
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index a0b92be98984..ab39285f71a6 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -378,6 +378,7 @@ extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
DEFINE_INSN_CACHE_OPS(optinsn);
extern void wait_for_kprobe_optimizer(void);
+bool optprobe_queued_unopt(struct optimized_kprobe *op);
#else /* !CONFIG_OPTPROBES */
static inline void wait_for_kprobe_optimizer(void) { }
#endif /* CONFIG_OPTPROBES */
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 6b6aff00b3b6..55e1807ca054 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -660,7 +660,7 @@ void wait_for_kprobe_optimizer(void)
mutex_unlock(&kprobe_mutex);
}
-static bool optprobe_queued_unopt(struct optimized_kprobe *op)
+bool optprobe_queued_unopt(struct optimized_kprobe *op)
{
struct optimized_kprobe *_op;