summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/perf
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/perf')
-rw-r--r--arch/powerpc/perf/8xx-pmu.c2
-rw-r--r--arch/powerpc/perf/Makefile2
-rw-r--r--arch/powerpc/perf/callchain.c2
-rw-r--r--arch/powerpc/perf/callchain_32.c2
-rw-r--r--arch/powerpc/perf/callchain_64.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c79
-rw-r--r--arch/powerpc/perf/hv-24x7.c18
-rw-r--r--arch/powerpc/perf/isa207-common.c18
-rw-r--r--arch/powerpc/perf/power10-pmu.c3
-rw-r--r--arch/powerpc/perf/vpa-pmu.c204
10 files changed, 279 insertions, 53 deletions
diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c
index 308a2e40d7be..1d2972229e3a 100644
--- a/arch/powerpc/perf/8xx-pmu.c
+++ b/arch/powerpc/perf/8xx-pmu.c
@@ -14,7 +14,7 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <asm/inst.h>
#define PERF_8xx_ID_CPU_CYCLES 1
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
index 4f53d0b97539..ac2cf58d62db 100644
--- a/arch/powerpc/perf/Makefile
+++ b/arch/powerpc/perf/Makefile
@@ -16,6 +16,8 @@ obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o e6500-pmu.o
obj-$(CONFIG_HV_PERF_CTRS) += hv-24x7.o hv-gpci.o hv-common.o
+obj-$(CONFIG_VPA_PMU) += vpa-pmu.o
+
obj-$(CONFIG_PPC_8xx) += 8xx-pmu.o
obj-$(CONFIG_PPC64) += $(obj64-y)
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 6b4434dd0ff3..26aa26482c9a 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -51,7 +51,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
lr = regs->link;
sp = regs->gpr[1];
- perf_callchain_store(entry, perf_instruction_pointer(regs));
+ perf_callchain_store(entry, perf_arch_instruction_pointer(regs));
if (!validate_sp(sp, current))
return;
diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c
index ea8cfe3806dc..ddcc2d8aa64a 100644
--- a/arch/powerpc/perf/callchain_32.c
+++ b/arch/powerpc/perf/callchain_32.c
@@ -139,7 +139,7 @@ void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
long level = 0;
unsigned int __user *fp, *uregs;
- next_ip = perf_instruction_pointer(regs);
+ next_ip = perf_arch_instruction_pointer(regs);
lr = regs->link;
sp = regs->gpr[1];
perf_callchain_store(entry, next_ip);
diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c
index 488e8a21a11e..115d1c105e8a 100644
--- a/arch/powerpc/perf/callchain_64.c
+++ b/arch/powerpc/perf/callchain_64.c
@@ -74,7 +74,7 @@ void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
struct signal_frame_64 __user *sigframe;
unsigned long __user *fp, *uregs;
- next_ip = perf_instruction_pointer(regs);
+ next_ip = perf_arch_instruction_pointer(regs);
lr = regs->link;
sp = regs->gpr[1];
perf_callchain_store(entry, next_ip);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 6b5f8a94e7d8..b906d28f74fd 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -16,7 +16,7 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <asm/hw_irq.h>
#include <asm/interrupt.h>
@@ -132,7 +132,10 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
-static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) {}
+static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in)
+{
+}
static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { }
#endif /* CONFIG_PPC32 */
@@ -266,51 +269,44 @@ static inline u32 perf_flags_from_msr(struct pt_regs *regs)
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
bool use_siar = regs_use_siar(regs);
- unsigned long mmcra = regs->dsisr;
- int marked = mmcra & MMCRA_SAMPLE_ENABLE;
+ unsigned long siar;
+ unsigned long addr;
if (!use_siar)
return perf_flags_from_msr(regs);
/*
- * Check the address in SIAR to identify the
- * privilege levels since the SIER[MSR_HV, MSR_PR]
- * bits are not set for marked events in power10
- * DD1.
- */
- if (marked && (ppmu->flags & PPMU_P10_DD1)) {
- unsigned long siar = mfspr(SPRN_SIAR);
- if (siar) {
- if (is_kernel_addr(siar))
- return PERF_RECORD_MISC_KERNEL;
- return PERF_RECORD_MISC_USER;
- } else {
- if (is_kernel_addr(regs->nip))
- return PERF_RECORD_MISC_KERNEL;
- return PERF_RECORD_MISC_USER;
- }
- }
-
- /*
* If we don't have flags in MMCRA, rather than using
* the MSR, we intuit the flags from the address in
* SIAR which should give slightly more reliable
* results
*/
if (ppmu->flags & PPMU_NO_SIPR) {
- unsigned long siar = mfspr(SPRN_SIAR);
+ siar = mfspr(SPRN_SIAR);
if (is_kernel_addr(siar))
return PERF_RECORD_MISC_KERNEL;
return PERF_RECORD_MISC_USER;
}
/* PR has priority over HV, so order below is important */
- if (regs_sipr(regs))
- return PERF_RECORD_MISC_USER;
-
- if (regs_sihv(regs) && (freeze_events_kernel != MMCR0_FCHV))
+ if (regs_sipr(regs)) {
+ if (!(ppmu->flags & PPMU_P10))
+ return PERF_RECORD_MISC_USER;
+ } else if (regs_sihv(regs) && (freeze_events_kernel != MMCR0_FCHV))
return PERF_RECORD_MISC_HYPERVISOR;
+ /*
+ * Check the address in SIAR to identify the
+ * privilege levels since the SIER[MSR_HV, MSR_PR]
+ * bits are not set correctly in power10 sometimes
+ */
+ if (ppmu->flags & PPMU_P10) {
+ siar = mfspr(SPRN_SIAR);
+ addr = siar ? siar : regs->nip;
+ if (!is_kernel_addr(addr))
+ return PERF_RECORD_MISC_USER;
+ }
+
return PERF_RECORD_MISC_KERNEL;
}
@@ -451,7 +447,8 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
/* Called from ctxsw to prevent one process's branch entries to
* mingle with the other process's entries during context switch.
*/
-static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
+static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in)
{
if (!ppmu->bhrb_nr)
return;
@@ -2229,6 +2226,10 @@ static struct pmu power_pmu = {
#define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \
PERF_SAMPLE_PHYS_ADDR | \
PERF_SAMPLE_DATA_PAGE_SIZE)
+
+#define SIER_TYPE_SHIFT 15
+#define SIER_TYPE_MASK (0x7ull << SIER_TYPE_SHIFT)
+
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
@@ -2298,6 +2299,22 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
record = 0;
/*
+ * SIER[46-48] presents instruction type of the sampled instruction.
+ * In ISA v3.0 and before values "0" and "7" are considered reserved.
+ * In ISA v3.1, value "7" has been used to indicate "larx/stcx".
+ * Drop the sample if "type" has reserved values for this field with a
+ * ISA version check.
+ */
+ if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
+ ppmu->get_mem_data_src) {
+ val = (regs->dar & SIER_TYPE_MASK) >> SIER_TYPE_SHIFT;
+ if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31))) {
+ record = 0;
+ atomic64_inc(&event->lost_samples);
+ }
+ }
+
+ /*
* Finally record data if requested.
*/
if (record) {
@@ -2339,7 +2356,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
* Called from generic code to get the misc flags (i.e. processor mode)
* for an event_id.
*/
-unsigned long perf_misc_flags(struct pt_regs *regs)
+unsigned long perf_arch_misc_flags(struct pt_regs *regs)
{
u32 flags = perf_get_misc_flags(regs);
@@ -2353,7 +2370,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
* Called from generic code to get the instruction pointer
* for an event_id.
*/
-unsigned long perf_instruction_pointer(struct pt_regs *regs)
+unsigned long perf_arch_instruction_pointer(struct pt_regs *regs)
{
unsigned long siar = mfspr(SPRN_SIAR);
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 057ec2e3451d..b0768f3d2893 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -425,16 +425,6 @@ static char *memdup_to_str(char *maybe_str, int max_len, gfp_t gfp)
return kasprintf(gfp, "%.*s", max_len, maybe_str);
}
-static ssize_t device_show_string(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dev_ext_attribute *d;
-
- d = container_of(attr, struct dev_ext_attribute, attr);
-
- return sprintf(buf, "%s\n", (char *)d->var);
-}
-
static ssize_t cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1008,7 +998,7 @@ e_out:
}
static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
long hret;
@@ -1118,14 +1108,14 @@ PAGE_0_ATTR(catalog_version, "%lld\n",
(unsigned long long)be64_to_cpu(page_0->version));
PAGE_0_ATTR(catalog_len, "%lld\n",
(unsigned long long)be32_to_cpu(page_0->length) * 4096);
-static BIN_ATTR_RO(catalog, 0/* real length varies */);
+static const BIN_ATTR_RO(catalog, 0/* real length varies */);
static DEVICE_ATTR_RO(domains);
static DEVICE_ATTR_RO(sockets);
static DEVICE_ATTR_RO(chipspersocket);
static DEVICE_ATTR_RO(coresperchip);
static DEVICE_ATTR_RO(cpumask);
-static struct bin_attribute *if_bin_attrs[] = {
+static const struct bin_attribute *const if_bin_attrs[] = {
&bin_attr_catalog,
NULL,
};
@@ -1151,7 +1141,7 @@ static struct attribute *if_attrs[] = {
static const struct attribute_group if_group = {
.name = "interface",
- .bin_attrs = if_bin_attrs,
+ .bin_attrs_new = if_bin_attrs,
.attrs = if_attrs,
};
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 56301b2bc8ae..2b3547fdba4a 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -319,10 +319,18 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
return;
}
- sier = mfspr(SPRN_SIER);
+ /*
+ * Use regs-dar for SPRN_SIER which is saved
+ * during perf_read_regs at the beginning
+ * of the PMU interrupt handler to avoid multiple
+ * reads of SPRN_SIER
+ */
+ sier = regs->dar;
val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
- if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31)))
+ if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) {
+ dsrc->val = 0;
return;
+ }
idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;
@@ -338,8 +346,12 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
* to determine the exact instruction type. If the sampling
* criteria is neither load or store, set the type as default
* to NA.
+ *
+ * Use regs->dsisr for MMCRA which is saved during perf_read_regs
+ * at the beginning of the PMU interrupt handler to avoid
+ * multiple reads of SPRN_MMCRA
*/
- mmcra = mfspr(SPRN_MMCRA);
+ mmcra = regs->dsisr;
op_type = (mmcra >> MMCRA_SAMP_ELIG_SHIFT) & MMCRA_SAMP_ELIG_MASK;
switch (op_type) {
diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c
index 62a68b6b2d4b..bb57b7cfe640 100644
--- a/arch/powerpc/perf/power10-pmu.c
+++ b/arch/powerpc/perf/power10-pmu.c
@@ -593,7 +593,8 @@ static struct power_pmu power10_pmu = {
.get_mem_weight = isa207_get_mem_weight,
.disable_pmc = isa207_disable_pmc,
.flags = PPMU_HAS_SIER | PPMU_ARCH_207S |
- PPMU_ARCH_31 | PPMU_HAS_ATTR_CONFIG1,
+ PPMU_ARCH_31 | PPMU_HAS_ATTR_CONFIG1 |
+ PPMU_P10,
.n_generic = ARRAY_SIZE(power10_generic_events),
.generic_events = power10_generic_events,
.cache_events = &power10_cache_events,
diff --git a/arch/powerpc/perf/vpa-pmu.c b/arch/powerpc/perf/vpa-pmu.c
new file mode 100644
index 000000000000..840733468959
--- /dev/null
+++ b/arch/powerpc/perf/vpa-pmu.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Performance monitoring support for Virtual Processor Area(VPA) based counters
+ *
+ * Copyright (C) 2024 IBM Corporation
+ */
+#define pr_fmt(fmt) "vpa_pmu: " fmt
+
+#include <linux/module.h>
+#include <linux/perf_event.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s_64.h>
+
+#define MODULE_VERS "1.0"
+#define MODULE_NAME "pseries_vpa_pmu"
+
+#define EVENT(_name, _code) enum{_name = _code}
+
+#define VPA_PMU_EVENT_VAR(_id) event_attr_##_id
+#define VPA_PMU_EVENT_PTR(_id) (&event_attr_##_id.attr.attr)
+
+static ssize_t vpa_pmu_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+
+ return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define VPA_PMU_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR(_name, VPA_PMU_EVENT_VAR(_id), _id, \
+ vpa_pmu_events_sysfs_show)
+
+EVENT(L1_TO_L2_CS_LAT, 0x1);
+EVENT(L2_TO_L1_CS_LAT, 0x2);
+EVENT(L2_RUNTIME_AGG, 0x3);
+
+VPA_PMU_EVENT_ATTR(l1_to_l2_lat, L1_TO_L2_CS_LAT);
+VPA_PMU_EVENT_ATTR(l2_to_l1_lat, L2_TO_L1_CS_LAT);
+VPA_PMU_EVENT_ATTR(l2_runtime_agg, L2_RUNTIME_AGG);
+
+static struct attribute *vpa_pmu_events_attr[] = {
+ VPA_PMU_EVENT_PTR(L1_TO_L2_CS_LAT),
+ VPA_PMU_EVENT_PTR(L2_TO_L1_CS_LAT),
+ VPA_PMU_EVENT_PTR(L2_RUNTIME_AGG),
+ NULL
+};
+
+static const struct attribute_group vpa_pmu_events_group = {
+ .name = "events",
+ .attrs = vpa_pmu_events_attr,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-31");
+static struct attribute *vpa_pmu_format_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group vpa_pmu_format_group = {
+ .name = "format",
+ .attrs = vpa_pmu_format_attr,
+};
+
+static const struct attribute_group *vpa_pmu_attr_groups[] = {
+ &vpa_pmu_events_group,
+ &vpa_pmu_format_group,
+ NULL
+};
+
+static int vpa_pmu_event_init(struct perf_event *event)
+{
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* it does not support event sampling mode */
+ if (is_sampling_event(event))
+ return -EOPNOTSUPP;
+
+ /* no branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ /* Invalid event code */
+ if ((event->attr.config <= 0) || (event->attr.config > 3))
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned long get_counter_data(struct perf_event *event)
+{
+ unsigned int config = event->attr.config;
+ u64 data;
+
+ switch (config) {
+ case L1_TO_L2_CS_LAT:
+ if (event->attach_state & PERF_ATTACH_TASK)
+ data = kvmhv_get_l1_to_l2_cs_time_vcpu();
+ else
+ data = kvmhv_get_l1_to_l2_cs_time();
+ break;
+ case L2_TO_L1_CS_LAT:
+ if (event->attach_state & PERF_ATTACH_TASK)
+ data = kvmhv_get_l2_to_l1_cs_time_vcpu();
+ else
+ data = kvmhv_get_l2_to_l1_cs_time();
+ break;
+ case L2_RUNTIME_AGG:
+ if (event->attach_state & PERF_ATTACH_TASK)
+ data = kvmhv_get_l2_runtime_agg_vcpu();
+ else
+ data = kvmhv_get_l2_runtime_agg();
+ break;
+ default:
+ data = 0;
+ break;
+ }
+
+ return data;
+}
+
+static int vpa_pmu_add(struct perf_event *event, int flags)
+{
+ u64 data;
+
+ kvmhv_set_l2_counters_status(smp_processor_id(), true);
+
+ data = get_counter_data(event);
+ local64_set(&event->hw.prev_count, data);
+
+ return 0;
+}
+
+static void vpa_pmu_read(struct perf_event *event)
+{
+ u64 prev_data, new_data, final_data;
+
+ prev_data = local64_read(&event->hw.prev_count);
+ new_data = get_counter_data(event);
+ final_data = new_data - prev_data;
+
+ local64_add(final_data, &event->count);
+}
+
+static void vpa_pmu_del(struct perf_event *event, int flags)
+{
+ vpa_pmu_read(event);
+
+ /*
+ * Disable vpa counter accumulation
+ */
+ kvmhv_set_l2_counters_status(smp_processor_id(), false);
+}
+
+static struct pmu vpa_pmu = {
+ .module = THIS_MODULE,
+ .task_ctx_nr = perf_sw_context,
+ .name = "vpa_pmu",
+ .event_init = vpa_pmu_event_init,
+ .add = vpa_pmu_add,
+ .del = vpa_pmu_del,
+ .read = vpa_pmu_read,
+ .attr_groups = vpa_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
+};
+
+static int __init pseries_vpa_pmu_init(void)
+{
+ /*
+ * List of current Linux on Power platforms and
+ * this driver is supported only in PowerVM LPAR
+ * (L1) platform.
+ *
+ * Enabled Linux on Power Platforms
+ * ----------------------------------------
+ * [X] PowerVM LPAR (L1)
+ * [ ] KVM Guest On PowerVM KoP(L2)
+ * [ ] Baremetal(PowerNV)
+ * [ ] KVM Guest On PowerNV
+ */
+ if (!firmware_has_feature(FW_FEATURE_LPAR) || is_kvm_guest())
+ return -ENODEV;
+
+ perf_pmu_register(&vpa_pmu, vpa_pmu.name, -1);
+ pr_info("Virtual Processor Area PMU registered.\n");
+
+ return 0;
+}
+
+static void __exit pseries_vpa_pmu_cleanup(void)
+{
+ perf_pmu_unregister(&vpa_pmu);
+ pr_info("Virtual Processor Area PMU unregistered.\n");
+}
+
+module_init(pseries_vpa_pmu_init);
+module_exit(pseries_vpa_pmu_cleanup);
+MODULE_DESCRIPTION("Perf Driver for pSeries VPA pmu counter");
+MODULE_AUTHOR("Kajol Jain <kjain@linux.ibm.com>");
+MODULE_AUTHOR("Madhavan Srinivasan <maddy@linux.ibm.com>");
+MODULE_LICENSE("GPL");