diff options
author | Steven Rostedt <srostedt@redhat.com> | 2010-11-10 12:56:12 +0100 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2010-11-12 21:20:08 -0500 |
commit | 91e86e560d0b3ce4c5fc64fd2bbb99f856a30a4e (patch) | |
tree | 26d7afb8373474a4d44d0eba4130499676c35bc7 | |
parent | b5908548537ccd3ada258ca5348df7ffc93e5a06 (diff) | |
download | linux-stable-91e86e560d0b3ce4c5fc64fd2bbb99f856a30a4e.tar.gz linux-stable-91e86e560d0b3ce4c5fc64fd2bbb99f856a30a4e.tar.bz2 linux-stable-91e86e560d0b3ce4c5fc64fd2bbb99f856a30a4e.zip |
tracing: Fix recursive user stack trace
The user stack trace can fault when examining the trace. Which
would call the do_page_fault handler, which would trace again,
which would do the user stack trace, which would fault and call
do_page_fault again ...
Thus this is causing a recursive bug. We need to have a recursion
detector here.
[ Resubmitted by Jiri Olsa ]
[ Eric Dumazet recommended using __this_cpu_* instead of __get_cpu_* ]
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
LKML-Reference: <1289390172-9730-3-git-send-email-jolsa@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | kernel/trace/trace.c | 19 |
1 files changed, 19 insertions, 0 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 82d9b8106cd0..ee6a7339cf0e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1284,6 +1284,8 @@ void trace_dump_stack(void) __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); } +static DEFINE_PER_CPU(int, user_stack_count); + void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) { @@ -1302,6 +1304,18 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) if (unlikely(in_nmi())) return; + /* + * prevent recursion, since the user stack tracing may + * trigger other kernel events. + */ + preempt_disable(); + if (__this_cpu_read(user_stack_count)) + goto out; + + __this_cpu_inc(user_stack_count); + + + event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, sizeof(*entry), flags, pc); if (!event) @@ -1319,6 +1333,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) save_stack_trace_user(&trace); if (!filter_check_discard(call, entry, buffer, event)) ring_buffer_unlock_commit(buffer, event); + + __this_cpu_dec(user_stack_count); + + out: + preempt_enable(); } #ifdef UNUSED |