summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/perf_event.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2015-05-13 17:12:25 +0100
committerWill Deacon <will.deacon@arm.com>2015-05-27 16:12:36 +0100
commitcc88116da0d18b8292f5437dbc0c4683c8a34ac1 (patch)
tree532e7eeeed93572614eb41b497373e9a1c07a763 /arch/arm/kernel/perf_event.c
parent64d0d3943e14653fcfd5f9b3bd585bc77fa053df (diff)
downloadlinux-stable-cc88116da0d18b8292f5437dbc0c4683c8a34ac1.tar.gz
linux-stable-cc88116da0d18b8292f5437dbc0c4683c8a34ac1.tar.bz2
linux-stable-cc88116da0d18b8292f5437dbc0c4683c8a34ac1.zip
arm: perf: treat PMUs as CPU affine
In multi-cluster systems, the PMUs can be different across clusters, and so our logical PMU may not be able to schedule events on all CPUs. This patch adds a cpumask to encode which CPUs a PMU driver supports controlling events for, and limits the driver to scheduling events on those CPUs, and enabling and disabling the physical PMUs on those CPUs. The cpumask is built based on the interrupt-affinity property, and in the absence of such a property a homogenous system is assumed. Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r--arch/arm/kernel/perf_event.c25
1 files changed, 25 insertions, 0 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 4a86a0133ac3..9b536be74f7b 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -11,6 +11,7 @@
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
+#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -229,6 +230,10 @@ armpmu_add(struct perf_event *event, int flags)
int idx;
int err = 0;
+ /* An event following a process won't be stopped earlier */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+ return -ENOENT;
+
perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */
@@ -454,6 +459,17 @@ static int armpmu_event_init(struct perf_event *event)
int err = 0;
atomic_t *active_events = &armpmu->active_events;
+ /*
+ * Reject CPU-affine events for CPUs that are of a different class to
+ * that which this PMU handles. Process-following events (where
+ * event->cpu == -1) can be migrated between CPUs, and thus we have to
+ * reject them later (in armpmu_add) if they're scheduled on a
+ * different class of CPU.
+ */
+ if (event->cpu != -1 &&
+ !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
+ return -ENOENT;
+
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
@@ -489,6 +505,10 @@ static void armpmu_enable(struct pmu *pmu)
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+ /* For task-bound events we may be called on other CPUs */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+ return;
+
if (enabled)
armpmu->start(armpmu);
}
@@ -496,6 +516,11 @@ static void armpmu_enable(struct pmu *pmu)
static void armpmu_disable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
+
+ /* For task-bound events we may be called on other CPUs */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+ return;
+
armpmu->stop(armpmu);
}