summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
authorDmitry Safonov <0x7f454c46@gmail.com>2015-11-06 22:07:26 +0300
committerSteven Rostedt <rostedt@goodmis.org>2015-11-07 13:25:14 -0500
commit03e88ae6b369da2a26a6e09ad165e57d210789cd (patch)
tree263c536f2003d6cd3779b85cdf4988ab9b88c0ca /kernel/trace/trace_functions_graph.c
parent8b1291994d8e5e621a8af7e165b106e50d04bbf1 (diff)
downloadlinux-03e88ae6b369da2a26a6e09ad165e57d210789cd.tar.gz
linux-03e88ae6b369da2a26a6e09ad165e57d210789cd.tar.bz2
linux-03e88ae6b369da2a26a6e09ad165e57d210789cd.zip
tracing: Remove unused ftrace_cpu_disabled per cpu variable
Since the ring buffer is lockless, there is no need to disable ftrace on CPU. And no one doing so: after commit 68179686ac67cb ("tracing: Remove ftrace_disable/enable_cpu()") ftrace_cpu_disabled stays the same after initialization, nothing changes it. ftrace_cpu_disabled shouldn't be used by any external module since it disables only function and graph_function tracers but not any other tracer. Link: http://lkml.kernel.org/r/1446836846-22239-1-git-send-email-0x7f454c46@gmail.com Signed-off-by: Dmitry Safonov <0x7f454c46@gmail.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c6
1 files changed, 0 insertions, 6 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 92382af7a213..a663cbb84107 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -288,9 +288,6 @@ int __trace_graph_entry(struct trace_array *tr,
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ftrace_graph_ent_entry *entry;
- if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
- return 0;
-
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
sizeof(*entry), flags, pc);
if (!event)
@@ -403,9 +400,6 @@ void __trace_graph_return(struct trace_array *tr,
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ftrace_graph_ret_entry *entry;
- if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
- return;
-
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
sizeof(*entry), flags, pc);
if (!event)