summaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2022-05-20 20:38:06 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-05-25 09:10:38 +0200
commit6cdd53a49aa7413e53c14ece27d826f0b628b18a (patch)
tree4cf90d31600d640119c5c55cc7d2acd2dbddf2cc /kernel/events
parent5a76a35f999d0936fcfa658b8f09df4508e0543b (diff)
downloadlinux-stable-6cdd53a49aa7413e53c14ece27d826f0b628b18a.tar.gz
linux-stable-6cdd53a49aa7413e53c14ece27d826f0b628b18a.tar.bz2
linux-stable-6cdd53a49aa7413e53c14ece27d826f0b628b18a.zip
perf: Fix sys_perf_event_open() race against self
commit 3ac6487e584a1eb54071dbe1212e05b884136704 upstream. Norbert reported that it's possible to race sys_perf_event_open() such that the looser ends up in another context from the group leader, triggering many WARNs. The move_group case checks for races against itself, but the !move_group case doesn't, seemingly relying on the previous group_leader->ctx == ctx check. However, that check is racy due to not holding any locks at that time. Therefore, re-check the result after acquiring locks and bailing if they no longer match. Additionally, clarify the not_move_group case from the move_group-vs-move_group race. Fixes: f63a8daa5812 ("perf: Fix event->ctx locking") Reported-by: Norbert Slusarek <nslusarek@gmx.net> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ec2f9e433208..88dd1398ae88 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -10758,6 +10758,9 @@ SYSCALL_DEFINE5(perf_event_open,
* Do not allow to attach to a group in a different task
* or CPU context. If we're moving SW events, we'll fix
* this up later, so allow that.
+ *
+ * Racy, not holding group_leader->ctx->mutex, see comment with
+ * perf_event_ctx_lock().
*/
if (!move_group && group_leader->ctx != ctx)
goto err_context;
@@ -10807,6 +10810,7 @@ SYSCALL_DEFINE5(perf_event_open,
} else {
perf_event_ctx_unlock(group_leader, gctx);
move_group = 0;
+ goto not_move_group;
}
}
@@ -10823,7 +10827,17 @@ SYSCALL_DEFINE5(perf_event_open,
}
} else {
mutex_lock(&ctx->mutex);
+
+ /*
+ * Now that we hold ctx->lock, (re)validate group_leader->ctx == ctx,
+ * see the group_leader && !move_group test earlier.
+ */
+ if (group_leader && group_leader->ctx != ctx) {
+ err = -EINVAL;
+ goto err_locked;
+ }
}
+not_move_group:
if (ctx->task == TASK_TOMBSTONE) {
err = -ESRCH;