summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2020-03-05 13:38:51 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-08-11 15:33:32 +0200
commit68a2350376b137f98f41a4f153c416282a16531e (patch)
tree86d98b23e55d4c4e46c45f1737a985be9b9167b0 /kernel
parentaabba1b100751ac54da34c313784abb141dbf73b (diff)
downloadlinux-stable-68a2350376b137f98f41a4f153c416282a16531e.tar.gz
linux-stable-68a2350376b137f98f41a4f153c416282a16531e.tar.bz2
linux-stable-68a2350376b137f98f41a4f153c416282a16531e.zip
perf/core: Fix endless multiplex timer
commit 90c91dfb86d0ff545bd329d3ddd72c147e2ae198 upstream. Kan and Andi reported that we fail to kill rotation when the flexible events go empty, but the context does not. XXX moar Fixes: fd7d55172d1e ("perf/cgroups: Don't rotate events for cgroups unnecessarily") Reported-by: Andi Kleen <ak@linux.intel.com> Reported-by: Kan Liang <kan.liang@linux.intel.com> Tested-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200305123851.GX2596@hirez.programming.kicks-ass.net Cc: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index aaaf50b25cc9..db1f5aa755f2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2171,6 +2171,7 @@ __perf_remove_from_context(struct perf_event *event,
if (!ctx->nr_events && ctx->is_active) {
ctx->is_active = 0;
+ ctx->rotate_necessary = 0;
if (ctx->task) {
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
cpuctx->task_ctx = NULL;
@@ -3047,12 +3048,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
if (!ctx->nr_active || !(is_active & EVENT_ALL))
return;
- /*
- * If we had been multiplexing, no rotations are necessary, now no events
- * are active.
- */
- ctx->rotate_necessary = 0;
-
perf_pmu_disable(ctx->pmu);
if (is_active & EVENT_PINNED) {
list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
@@ -3062,6 +3057,13 @@ static void ctx_sched_out(struct perf_event_context *ctx,
if (is_active & EVENT_FLEXIBLE) {
list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list)
group_sched_out(event, cpuctx, ctx);
+
+ /*
+ * Since we cleared EVENT_FLEXIBLE, also clear
+ * rotate_necessary, is will be reset by
+ * ctx_flexible_sched_in() when needed.
+ */
+ ctx->rotate_necessary = 0;
}
perf_pmu_enable(ctx->pmu);
}
@@ -3800,6 +3802,12 @@ ctx_event_to_rotate(struct perf_event_context *ctx)
typeof(*event), group_node);
}
+ /*
+ * Unconditionally clear rotate_necessary; if ctx_flexible_sched_in()
+ * finds there are unschedulable events, it will set it again.
+ */
+ ctx->rotate_necessary = 0;
+
return event;
}