summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/perf_event.c')
-rw-r--r--arch/arm64/kernel/perf_event.c72
1 files changed, 19 insertions, 53 deletions
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 767c4f6e1f5b..f419a7c075a4 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -20,6 +20,7 @@
*/
#include <asm/irq_regs.h>
+#include <asm/perf_event.h>
#include <asm/virt.h>
#include <linux/of.h>
@@ -384,9 +385,6 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = {
#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
(ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
-#define ARMV8_MAX_COUNTERS 32
-#define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
-
/*
* ARMv8 low level PMU access
*/
@@ -395,40 +393,7 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = {
* Perf Event to low level counters mapping
*/
#define ARMV8_IDX_TO_COUNTER(x) \
- (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
-
-/*
- * Per-CPU PMCR: config reg
- */
-#define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
-#define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
-#define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
-#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
-#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
-#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
-#define ARMV8_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
-#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
-#define ARMV8_PMCR_N_MASK 0x1f
-#define ARMV8_PMCR_MASK 0x7f /* Mask for writable bits */
-
-/*
- * PMOVSR: counters overflow flag status reg
- */
-#define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
-#define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
-
-/*
- * PMXEVTYPER: Event selection reg
- */
-#define ARMV8_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
-#define ARMV8_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
-
-/*
- * Event filters for PMUv3
- */
-#define ARMV8_EXCLUDE_EL1 (1 << 31)
-#define ARMV8_EXCLUDE_EL0 (1 << 30)
-#define ARMV8_INCLUDE_EL2 (1 << 27)
+ (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
static inline u32 armv8pmu_pmcr_read(void)
{
@@ -439,14 +404,14 @@ static inline u32 armv8pmu_pmcr_read(void)
static inline void armv8pmu_pmcr_write(u32 val)
{
- val &= ARMV8_PMCR_MASK;
+ val &= ARMV8_PMU_PMCR_MASK;
isb();
asm volatile("msr pmcr_el0, %0" :: "r" (val));
}
static inline int armv8pmu_has_overflowed(u32 pmovsr)
{
- return pmovsr & ARMV8_OVERFLOWED_MASK;
+ return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
}
static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
@@ -512,7 +477,7 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
static inline void armv8pmu_write_evtype(int idx, u32 val)
{
if (armv8pmu_select_counter(idx) == idx) {
- val &= ARMV8_EVTYPE_MASK;
+ val &= ARMV8_PMU_EVTYPE_MASK;
asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
}
}
@@ -558,7 +523,7 @@ static inline u32 armv8pmu_getreset_flags(void)
asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
/* Write to clear flags */
- value &= ARMV8_OVSR_MASK;
+ value &= ARMV8_PMU_OVSR_MASK;
asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
return value;
@@ -696,7 +661,7 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
- armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
+ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
@@ -707,7 +672,7 @@ static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */
- armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
+ armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
@@ -717,7 +682,7 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
int idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT;
+ unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
/* Always place a cycle counter into the cycle counter. */
if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
@@ -754,11 +719,11 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
attr->exclude_kernel != attr->exclude_hv)
return -EINVAL;
if (attr->exclude_user)
- config_base |= ARMV8_EXCLUDE_EL0;
+ config_base |= ARMV8_PMU_EXCLUDE_EL0;
if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
- config_base |= ARMV8_EXCLUDE_EL1;
+ config_base |= ARMV8_PMU_EXCLUDE_EL1;
if (!attr->exclude_hv)
- config_base |= ARMV8_INCLUDE_EL2;
+ config_base |= ARMV8_PMU_INCLUDE_EL2;
/*
* Install the filter into config_base as this is used to
@@ -784,35 +749,36 @@ static void armv8pmu_reset(void *info)
* Initialize & Reset PMNC. Request overflow interrupt for
* 64 bit cycle counter but cheat in armv8pmu_write_counter().
*/
- armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C | ARMV8_PMCR_LC);
+ armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
+ ARMV8_PMU_PMCR_LC);
}
static int armv8_pmuv3_map_event(struct perf_event *event)
{
return armpmu_map_event(event, &armv8_pmuv3_perf_map,
&armv8_pmuv3_perf_cache_map,
- ARMV8_EVTYPE_EVENT);
+ ARMV8_PMU_EVTYPE_EVENT);
}
static int armv8_a53_map_event(struct perf_event *event)
{
return armpmu_map_event(event, &armv8_a53_perf_map,
&armv8_a53_perf_cache_map,
- ARMV8_EVTYPE_EVENT);
+ ARMV8_PMU_EVTYPE_EVENT);
}
static int armv8_a57_map_event(struct perf_event *event)
{
return armpmu_map_event(event, &armv8_a57_perf_map,
&armv8_a57_perf_cache_map,
- ARMV8_EVTYPE_EVENT);
+ ARMV8_PMU_EVTYPE_EVENT);
}
static int armv8_thunder_map_event(struct perf_event *event)
{
return armpmu_map_event(event, &armv8_thunder_perf_map,
&armv8_thunder_perf_cache_map,
- ARMV8_EVTYPE_EVENT);
+ ARMV8_PMU_EVTYPE_EVENT);
}
static void armv8pmu_read_num_pmnc_events(void *info)
@@ -820,7 +786,7 @@ static void armv8pmu_read_num_pmnc_events(void *info)
int *nb_cnt = info;
/* Read the nb of CNTx counters supported from PMNC */
- *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
+ *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
/* Add the CPU cycles counter */
*nb_cnt += 1;