summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorEric B Munson <emunson@mgebm.net>2011-05-23 04:22:40 +0000
committerGreg Kroah-Hartman <gregkh@suse.de>2011-06-23 15:24:00 -0700
commit24fb3f4cf3de9955eae325d421047c0f2dd6b48f (patch)
tree92e42fc66c70281250909a1916fae6dac13b88d3 /arch
parentc3bf52934198b95f7fb546bd4a544ab38d19be52 (diff)
downloadlinux-stable-24fb3f4cf3de9955eae325d421047c0f2dd6b48f.tar.gz
linux-stable-24fb3f4cf3de9955eae325d421047c0f2dd6b48f.tar.bz2
linux-stable-24fb3f4cf3de9955eae325d421047c0f2dd6b48f.zip
powerpc/oprofile: Handle events that raise an exception without overflowing
commit ad5d5292f16c6c1d7d3e257c4c7407594286b97e upstream. Commit 0837e3242c73566fc1c0196b4ec61779c25ffc93 fixes a situation on POWER7 where events can roll back if a specualtive event doesn't actually complete. This can raise a performance monitor exception. We need to catch this to ensure that we reset the PMC. In all cases the PMC will be less than 256 cycles from overflow. This patch lifts Anton's fix for the problem in perf and applies it to oprofile as well. Signed-off-by: Eric B Munson <emunson@mgebm.net> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 80774092db77..93636ca48d3d 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -261,6 +261,28 @@ static int get_kernel(unsigned long pc, unsigned long mmcra)
return is_kernel;
}
+static bool pmc_overflow(unsigned long val)
+{
+ if ((int)val < 0)
+ return true;
+
+ /*
+ * Events on POWER7 can roll back if a speculative event doesn't
+ * eventually complete. Unfortunately in some rare cases they will
+ * raise a performance monitor exception. We need to catch this to
+ * ensure we reset the PMC. In all cases the PMC will be 256 or less
+ * cycles from overflow.
+ *
+ * We only do this if the first pass fails to find any overflowing
+ * PMCs because a user might set a period of less than 256 and we
+ * don't want to mistakenly reset them.
+ */
+ if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
+ return true;
+
+ return false;
+}
+
static void power4_handle_interrupt(struct pt_regs *regs,
struct op_counter_config *ctr)
{
@@ -281,7 +303,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
val = classic_ctr_read(i);
- if (val < 0) {
+ if (pmc_overflow(val)) {
if (oprofile_running && ctr[i].enabled) {
oprofile_add_ext_sample(pc, regs, i, is_kernel);
classic_ctr_write(i, reset_value[i]);