summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>2017-10-23 22:07:38 +0530
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-12-10 13:40:43 +0100
commit1ffabfc1d58ba586fa11cc3cde21d54afec03387 (patch)
treee18960f2dd1016105d220e4bdd7cc2f2d4b42098 /arch
parentf8d07852819a0c2da239fca155342b4f80f4aedf (diff)
downloadlinux-stable-1ffabfc1d58ba586fa11cc3cde21d54afec03387.tar.gz
linux-stable-1ffabfc1d58ba586fa11cc3cde21d54afec03387.tar.bz2
linux-stable-1ffabfc1d58ba586fa11cc3cde21d54afec03387.zip
powerpc/kprobes: Disable preemption before invoking probe handler for optprobes
commit 8a2d71a3f2737e2448aa68de2b6052cb570d3d2a upstream. Per Documentation/kprobes.txt, probe handlers need to be invoked with preemption disabled. Update optimized_callback() to do so. Also move get_kprobe_ctlblk() invocation post preemption disable, since it accesses pre-cpu data. This was not an issue so far since optprobes wasn't selected if CONFIG_PREEMPT was enabled. Commit a30b85df7d599f ("kprobes: Use synchronize_rcu_tasks() for optprobe with CONFIG_PREEMPT=y") changes this. Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Acked-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/optprobes.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index 91e037ab20a1..60ba7f1370a8 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -115,7 +115,6 @@ static unsigned long can_optimize(struct kprobe *p)
static void optimized_callback(struct optimized_kprobe *op,
struct pt_regs *regs)
{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long flags;
/* This is possible if op is under delayed unoptimizing */
@@ -124,13 +123,14 @@ static void optimized_callback(struct optimized_kprobe *op,
local_irq_save(flags);
hard_irq_disable();
+ preempt_disable();
if (kprobe_running()) {
kprobes_inc_nmissed_count(&op->kp);
} else {
__this_cpu_write(current_kprobe, &op->kp);
regs->nip = (unsigned long)op->kp.addr;
- kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
opt_pre_handler(&op->kp, regs);
__this_cpu_write(current_kprobe, NULL);
}
@@ -140,6 +140,7 @@ static void optimized_callback(struct optimized_kprobe *op,
* local_irq_restore() will re-enable interrupts,
* if they were hard disabled.
*/
+ preempt_enable_no_resched();
local_irq_restore(flags);
}
NOKPROBE_SYMBOL(optimized_callback);