summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2013-07-02 14:48:23 -0400
committerSteven Rostedt <rostedt@goodmis.org>2013-07-02 20:42:25 -0400
commit2a6c24afab70dbcfee49f4c76e1511eec1a3298b (patch)
treeea1f139f0f1192f4494a68166eb0ba36322a23de /kernel
parent8e2e2fa47129532a30cff6c25a47078dc97d9260 (diff)
downloadlinux-2a6c24afab70dbcfee49f4c76e1511eec1a3298b.tar.gz
linux-2a6c24afab70dbcfee49f4c76e1511eec1a3298b.tar.bz2
linux-2a6c24afab70dbcfee49f4c76e1511eec1a3298b.zip
tracing: Fix race between deleting buffer and setting events
While analyzing the code, I discovered that there's a potential race between deleting a trace instance and setting events. There are a few races that can occur if events are being traced as the buffer is being deleted. Mostly the problem comes with freeing the descriptor used by the trace event callback. To prevent problems like this, the events are disabled before the buffer is deleted. The problem with the current solution is that the event_mutex is let go between disabling the events and freeing the files, which means that the events could be enabled again while the freeing takes place. Cc: stable@vger.kernel.org # 3.10 Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace_events.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 920e08fb53b3..7d854290bf81 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -441,14 +441,14 @@ static int tracing_release_generic_file(struct inode *inode, struct file *filp)
/*
* __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
*/
-static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
- const char *sub, const char *event, int set)
+static int
+__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
+ const char *sub, const char *event, int set)
{
struct ftrace_event_file *file;
struct ftrace_event_call *call;
int ret = -EINVAL;
- mutex_lock(&event_mutex);
list_for_each_entry(file, &tr->events, list) {
call = file->event_call;
@@ -474,6 +474,17 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
ret = 0;
}
+
+ return ret;
+}
+
+static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
+ const char *sub, const char *event, int set)
+{
+ int ret;
+
+ mutex_lock(&event_mutex);
+ ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
mutex_unlock(&event_mutex);
return ret;
@@ -2408,11 +2419,11 @@ early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
int event_trace_del_tracer(struct trace_array *tr)
{
- /* Disable any running events */
- __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
-
mutex_lock(&event_mutex);
+ /* Disable any running events */
+ __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
+
down_write(&trace_event_sem);
__trace_remove_event_dirs(tr);
debugfs_remove_recursive(tr->event_dir);