summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChangbin Du <changbin.du@gmail.com>2019-07-30 22:08:50 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-08-06 19:08:15 +0200
commite4287f51fcb456f6e4f41a8d41073e63d8ea4fdb (patch)
tree26f45dda40ce7338833204f3524c03865b2e6672 /kernel
parent349cd3dcbf9b73fcebe9206db124078188d1b9f5 (diff)
downloadlinux-stable-e4287f51fcb456f6e4f41a8d41073e63d8ea4fdb.tar.gz
linux-stable-e4287f51fcb456f6e4f41a8d41073e63d8ea4fdb.tar.bz2
linux-stable-e4287f51fcb456f6e4f41a8d41073e63d8ea4fdb.zip
fgraph: Remove redundant ftrace_graph_notrace_addr() test
commit 6c77221df96177da0520847ce91e33f539fb8b2d upstream. We already have tested it before. The second one should be removed. With this change, the performance should have little improvement. Link: http://lkml.kernel.org/r/20190730140850.7927-1-changbin.du@gmail.com Cc: stable@vger.kernel.org Fixes: 9cd2992f2d6c ("fgraph: Have set_graph_notrace only affect function_graph tracer") Signed-off-by: Changbin Du <changbin.du@gmail.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace_functions_graph.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 69ebf3c2f1b5..78af97163147 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -137,6 +137,13 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
return 0;
+ /*
+ * Do not trace a function if it's filtered by set_graph_notrace.
+ * Make the index of ret stack negative to indicate that it should
+ * ignore further functions. But it needs its own ret stack entry
+ * to recover the original index in order to continue tracing after
+ * returning from the function.
+ */
if (ftrace_graph_notrace_addr(trace->func)) {
trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
/*
@@ -156,16 +163,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
return 0;
/*
- * Do not trace a function if it's filtered by set_graph_notrace.
- * Make the index of ret stack negative to indicate that it should
- * ignore further functions. But it needs its own ret stack entry
- * to recover the original index in order to continue tracing after
- * returning from the function.
- */
- if (ftrace_graph_notrace_addr(trace->func))
- return 1;
-
- /*
* Stop here if tracing_threshold is set. We only write function return
* events to the ring buffer.
*/