summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorStephane Eranian <eranian@google.com>2010-01-18 10:58:01 +0200
committerIngo Molnar <mingo@elte.hu>2010-01-21 13:40:41 +0100
commitb27d515a49169e5e2a92d621faac761074a8c5b1 (patch)
treecc27b8a7c3b3d07df4a09c0eed9ca7aaf265ead1 /arch/x86
parent92b6759857ea3ad19bc6871044e373f6251841d3 (diff)
downloadlinux-b27d515a49169e5e2a92d621faac761074a8c5b1.tar.gz
linux-b27d515a49169e5e2a92d621faac761074a8c5b1.tar.bz2
linux-b27d515a49169e5e2a92d621faac761074a8c5b1.zip
perf: x86: Add support for the ANY bit
Propagate the ANY bit into the fixed counter config for v3 and higher. Signed-off-by: Stephane Eranian <eranian@google.com> [a.p.zijlstra@chello.nl: split from larger patch] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <4b5430c6.0f975e0a.1bf9.ffff85fe@mx.google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/perf_event.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event.c7
2 files changed, 8 insertions, 0 deletions
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 8d9f8548a870..1380367dabd9 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -19,6 +19,7 @@
#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
+#define ARCH_PERFMON_EVENTSEL_ANY (1 << 21)
#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index d616c06e99b4..8c1c07073ccc 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1343,6 +1343,13 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
bits |= 0x2;
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
bits |= 0x1;
+
+ /*
+ * ANY bit is supported in v3 and up
+ */
+ if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
+ bits |= 0x4;
+
bits <<= (idx * 4);
mask = 0xfULL << (idx * 4);