summaryrefslogtreecommitdiffstats
path: root/arch/x86/events/amd
diff options
context:
space:
mode:
authorRavi Bangoria <ravi.bangoria@amd.com>2022-05-09 10:19:09 +0530
committerPeter Zijlstra <peterz@infradead.org>2022-05-11 16:27:10 +0200
commitba5d35b442c65f32d38ef61f732218274c6dcf4c (patch)
tree70a044c54e84ef07c4283f872e5e0e35934311e4 /arch/x86/events/amd
parent2a7a7e658682bfd7501dc6b4c9d365aa6c79788a (diff)
downloadlinux-stable-ba5d35b442c65f32d38ef61f732218274c6dcf4c.tar.gz
linux-stable-ba5d35b442c65f32d38ef61f732218274c6dcf4c.tar.bz2
linux-stable-ba5d35b442c65f32d38ef61f732218274c6dcf4c.zip
perf/amd/ibs: Add support for L3 miss filtering
IBS L3 miss filtering works by tagging an instruction on IBS counter overflow and generating an NMI if the tagged instruction causes an L3 miss. Samples without an L3 miss are discarded and counter is reset with random value (between 1-15 for fetch pmu and 1-127 for op pmu). This helps in reducing sampling overhead when user is interested only in such samples. One of the use case of such filtered samples is to feed data to page-migration daemon in tiered memory systems. Add support for L3 miss filtering in IBS driver via new pmu attribute "l3missonly". Example usage: # perf record -a -e ibs_op/l3missonly=1/ --raw-samples sleep 5 Signed-off-by: Ravi Bangoria <ravi.bangoria@amd.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20220509044914.1473-4-ravi.bangoria@amd.com
Diffstat (limited to 'arch/x86/events/amd')
-rw-r--r--arch/x86/events/amd/ibs.c67
1 files changed, 60 insertions, 7 deletions
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index ece4f6a7d24b..2dc8b7ec030a 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -544,22 +544,46 @@ static const struct attribute_group *empty_attr_groups[] = {
PMU_FORMAT_ATTR(rand_en, "config:57");
PMU_FORMAT_ATTR(cnt_ctl, "config:19");
+PMU_EVENT_ATTR_STRING(l3missonly, fetch_l3missonly, "config:59");
+PMU_EVENT_ATTR_STRING(l3missonly, op_l3missonly, "config:16");
+
+static umode_t
+zen4_ibs_extensions_is_visible(struct kobject *kobj, struct attribute *attr, int i)
+{
+ return ibs_caps & IBS_CAPS_ZEN4 ? attr->mode : 0;
+}
static struct attribute *rand_en_attrs[] = {
&format_attr_rand_en.attr,
NULL,
};
+static struct attribute *fetch_l3missonly_attrs[] = {
+ &fetch_l3missonly.attr.attr,
+ NULL,
+};
+
static struct attribute_group group_rand_en = {
.name = "format",
.attrs = rand_en_attrs,
};
+static struct attribute_group group_fetch_l3missonly = {
+ .name = "format",
+ .attrs = fetch_l3missonly_attrs,
+ .is_visible = zen4_ibs_extensions_is_visible,
+};
+
static const struct attribute_group *fetch_attr_groups[] = {
&group_rand_en,
NULL,
};
+static const struct attribute_group *fetch_attr_update[] = {
+ &group_fetch_l3missonly,
+ NULL,
+};
+
static umode_t
cnt_ctl_is_visible(struct kobject *kobj, struct attribute *attr, int i)
{
@@ -571,14 +595,26 @@ static struct attribute *cnt_ctl_attrs[] = {
NULL,
};
+static struct attribute *op_l3missonly_attrs[] = {
+ &op_l3missonly.attr.attr,
+ NULL,
+};
+
static struct attribute_group group_cnt_ctl = {
.name = "format",
.attrs = cnt_ctl_attrs,
.is_visible = cnt_ctl_is_visible,
};
+static struct attribute_group group_op_l3missonly = {
+ .name = "format",
+ .attrs = op_l3missonly_attrs,
+ .is_visible = zen4_ibs_extensions_is_visible,
+};
+
static const struct attribute_group *op_attr_update[] = {
&group_cnt_ctl,
+ &group_op_l3missonly,
NULL,
};
@@ -805,10 +841,8 @@ static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
return ret;
}
-static __init int perf_event_ibs_init(void)
+static __init int perf_ibs_fetch_init(void)
{
- int ret;
-
/*
* Some chips fail to reset the fetch count when it is written; instead
* they need a 0-1 transition of IbsFetchEn.
@@ -819,12 +853,17 @@ static __init int perf_event_ibs_init(void)
if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10)
perf_ibs_fetch.fetch_ignore_if_zero_rip = 1;
+ if (ibs_caps & IBS_CAPS_ZEN4)
+ perf_ibs_fetch.config_mask |= IBS_FETCH_L3MISSONLY;
+
perf_ibs_fetch.pmu.attr_groups = fetch_attr_groups;
+ perf_ibs_fetch.pmu.attr_update = fetch_attr_update;
- ret = perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
- if (ret)
- return ret;
+ return perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
+}
+static __init int perf_ibs_op_init(void)
+{
if (ibs_caps & IBS_CAPS_OPCNT)
perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
@@ -834,10 +873,24 @@ static __init int perf_event_ibs_init(void)
perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK;
}
+ if (ibs_caps & IBS_CAPS_ZEN4)
+ perf_ibs_op.config_mask |= IBS_OP_L3MISSONLY;
+
perf_ibs_op.pmu.attr_groups = empty_attr_groups;
perf_ibs_op.pmu.attr_update = op_attr_update;
- ret = perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
+ return perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
+}
+
+static __init int perf_event_ibs_init(void)
+{
+ int ret;
+
+ ret = perf_ibs_fetch_init();
+ if (ret)
+ return ret;
+
+ ret = perf_ibs_op_init();
if (ret)
goto err_op;