summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChangbin Du <changbin.du@gmail.com>2019-07-30 22:08:50 +0800
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2019-07-30 21:50:03 -0400
commit6c77221df96177da0520847ce91e33f539fb8b2d (patch)
tree83b8c7dae9de0b0e41ee5472b9d0e2a7c646a79d
parentb1d45c23284e55a379f85554a27a548b7988d47a (diff)
downloadlinux-stable-6c77221df96177da0520847ce91e33f539fb8b2d.tar.gz
linux-stable-6c77221df96177da0520847ce91e33f539fb8b2d.tar.bz2
linux-stable-6c77221df96177da0520847ce91e33f539fb8b2d.zip
fgraph: Remove redundant ftrace_graph_notrace_addr() test
We already have tested it before. The second one should be removed. With this change, the performance should have little improvement. Link: http://lkml.kernel.org/r/20190730140850.7927-1-changbin.du@gmail.com Cc: stable@vger.kernel.org Fixes: 9cd2992f2d6c ("fgraph: Have set_graph_notrace only affect function_graph tracer") Signed-off-by: Changbin Du <changbin.du@gmail.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r--kernel/trace/trace_functions_graph.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 69ebf3c2f1b5..78af97163147 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -137,6 +137,13 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
return 0;
+ /*
+ * Do not trace a function if it's filtered by set_graph_notrace.
+ * Make the index of ret stack negative to indicate that it should
+ * ignore further functions. But it needs its own ret stack entry
+ * to recover the original index in order to continue tracing after
+ * returning from the function.
+ */
if (ftrace_graph_notrace_addr(trace->func)) {
trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
/*
@@ -156,16 +163,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
return 0;
/*
- * Do not trace a function if it's filtered by set_graph_notrace.
- * Make the index of ret stack negative to indicate that it should
- * ignore further functions. But it needs its own ret stack entry
- * to recover the original index in order to continue tracing after
- * returning from the function.
- */
- if (ftrace_graph_notrace_addr(trace->func))
- return 1;
-
- /*
* Stop here if tracing_threshold is set. We only write function return
* events to the ring buffer.
*/