summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRob Herring (Arm) <robh@kernel.org>2024-07-31 10:51:24 -0600
committerWill Deacon <will@kernel.org>2024-08-16 13:09:12 +0100
commitd8226d8cfbaf5eb9771af8ad8b4e58697e2ffb74 (patch)
treedd53139085c19105c854dcdd188291e4c6baf1d8 /drivers
parent2f62701fa5b0ee94c68d2fcfc470d08aef195441 (diff)
downloadlinux-stable-d8226d8cfbaf5eb9771af8ad8b4e58697e2ffb74.tar.gz
linux-stable-d8226d8cfbaf5eb9771af8ad8b4e58697e2ffb74.tar.bz2
linux-stable-d8226d8cfbaf5eb9771af8ad8b4e58697e2ffb74.zip
perf: arm_pmuv3: Add support for Armv9.4 PMU instruction counter
Armv9.4/8.9 PMU adds optional support for a fixed instruction counter similar to the fixed cycle counter. Support for the feature is indicated in the ID_AA64DFR1_EL1 register PMICNTR field. The counter is not accessible in AArch32. Existing userspace using direct counter access won't know how to handle the fixed instruction counter, so we have to avoid using the counter when user access is requested. Acked-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Rob Herring (Arm) <robh@kernel.org> Tested-by: James Clark <james.clark@linaro.org> Link: https://lore.kernel.org/r/20240731-arm-pmu-3-9-icntr-v3-7-280a8d7ff465@kernel.org Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/perf/arm_pmuv3.c25
1 files changed, 25 insertions, 0 deletions
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index 18046cf4b3a3..4d000532a07f 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -571,6 +571,8 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
if (idx == ARMV8_PMU_CYCLE_IDX)
value = read_pmccntr();
+ else if (idx == ARMV8_PMU_INSTR_IDX)
+ value = read_pmicntr();
else
value = armv8pmu_read_hw_counter(event);
@@ -604,6 +606,8 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
if (idx == ARMV8_PMU_CYCLE_IDX)
write_pmccntr(value);
+ else if (idx == ARMV8_PMU_INSTR_IDX)
+ write_pmicntr(value);
else
armv8pmu_write_hw_counter(event, value);
}
@@ -641,6 +645,8 @@ static void armv8pmu_write_event_type(struct perf_event *event)
} else {
if (idx == ARMV8_PMU_CYCLE_IDX)
write_pmccfiltr(hwc->config_base);
+ else if (idx == ARMV8_PMU_INSTR_IDX)
+ write_pmicfiltr(hwc->config_base);
else
armv8pmu_write_evtype(idx, hwc->config_base);
}
@@ -769,6 +775,8 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
ARMPMU_MAX_HWEVENTS) {
if (i == ARMV8_PMU_CYCLE_IDX)
write_pmccntr(0);
+ else if (i == ARMV8_PMU_INSTR_IDX)
+ write_pmicntr(0);
else
armv8pmu_write_evcntr(i, 0);
}
@@ -937,6 +945,19 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
}
/*
+ * Always prefer to place a instruction counter into the instruction counter,
+ * but don't expose the instruction counter to userspace access as userspace
+ * may not know how to handle it.
+ */
+ if ((evtype == ARMV8_PMUV3_PERFCTR_INST_RETIRED) &&
+ !armv8pmu_event_get_threshold(&event->attr) &&
+ test_bit(ARMV8_PMU_INSTR_IDX, cpu_pmu->cntr_mask) &&
+ !armv8pmu_event_want_user_access(event)) {
+ if (!test_and_set_bit(ARMV8_PMU_INSTR_IDX, cpuc->used_mask))
+ return ARMV8_PMU_INSTR_IDX;
+ }
+
+ /*
* Otherwise use events counters
*/
if (armv8pmu_event_is_chained(event))
@@ -1193,6 +1214,10 @@ static void __armv8pmu_probe_pmu(void *info)
/* Add the CPU cycles counter */
set_bit(ARMV8_PMU_CYCLE_IDX, cpu_pmu->cntr_mask);
+ /* Add the CPU instructions counter */
+ if (pmuv3_has_icntr())
+ set_bit(ARMV8_PMU_INSTR_IDX, cpu_pmu->cntr_mask);
+
pmceid[0] = pmceid_raw[0] = read_pmceid0();
pmceid[1] = pmceid_raw[1] = read_pmceid1();