summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/kprobes/core.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 28cee019209c..f423b0ef23a7 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1057,9 +1057,10 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
* tailcall optimization. So, to be absolutely safe
* we also save and restore enough stack bytes to cover
* the argument area.
+ * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
+ * raw stack chunk with redzones:
*/
- memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
- MIN_STACK_SIZE(addr));
+ __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
regs->flags &= ~X86_EFLAGS_IF;
trace_hardirqs_off();
regs->ip = (unsigned long)(jp->entry);
@@ -1118,7 +1119,7 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
/* It's OK to start function graph tracing again */
unpause_graph_tracing();
*regs = kcb->jprobe_saved_regs;
- memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
+ __memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
preempt_enable_no_resched();
return 1;
}