summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2020-09-29 12:40:31 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-10-14 09:48:15 +0200
commit03170ea7eb98ac6cc334f7225e8f30e5cc7ddf18 (patch)
tree3c0324201ebc811a42f44ccf91c06a0432086687 /kernel
parentd6b6cf4d72fe107ae21ddaae1aa5a9b4622efbd5 (diff)
downloadlinux-stable-03170ea7eb98ac6cc334f7225e8f30e5cc7ddf18.tar.gz
linux-stable-03170ea7eb98ac6cc334f7225e8f30e5cc7ddf18.tar.bz2
linux-stable-03170ea7eb98ac6cc334f7225e8f30e5cc7ddf18.zip
ftrace: Move RCU is watching check after recursion check
commit b40341fad6cc2daa195f8090fd3348f18fff640a upstream. The first thing that the ftrace function callback helper functions should do is to check for recursion. Peter Zijlstra found that when "rcu_is_watching()" had its notrace removed, it caused perf function tracing to crash. This is because the call of rcu_is_watching() is tested before function recursion is checked and and if it is traced, it will cause an infinite recursion loop. rcu_is_watching() should still stay notrace, but to prevent this should never had crashed in the first place. The recursion prevention must be the first thing done in callback functions. Link: https://lore.kernel.org/r/20200929112541.GM2628@hirez.programming.kicks-ass.net Cc: stable@vger.kernel.org Cc: Paul McKenney <paulmck@kernel.org> Fixes: c68c0fa293417 ("ftrace: Have ftrace_ops_get_func() handle RCU and PER_CPU flags too") Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reported-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ftrace.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4d1be82e7011..51e47e18764e 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -5334,17 +5334,15 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
{
int bit;
- if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
- return;
-
bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
if (bit < 0)
return;
preempt_disable_notrace();
- if (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
- !ftrace_function_local_disabled(op)) {
+ if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
+ (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
+ !ftrace_function_local_disabled(op))) {
op->func(ip, parent_ip, op, regs);
}