summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-02-10 09:05:07 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-02-10 09:05:07 -0800
commitce2814f227d3adae8456f7cbd0bd5f922fd284f0 (patch)
tree351a35a80fb62307ea48bc1b74f687d309dbb31a /kernel
parent1282ab3f8f7bca1d3a7fb701fbe4f0f772e72ea0 (diff)
parentf39d47ff819ed52a2afbdbecbe35f23f7755f58d (diff)
downloadlinux-stable-ce2814f227d3adae8456f7cbd0bd5f922fd284f0.tar.gz
linux-stable-ce2814f227d3adae8456f7cbd0bd5f922fd284f0.tar.bz2
linux-stable-ce2814f227d3adae8456f7cbd0bd5f922fd284f0.zip
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf: Fix double start/stop in x86_pmu_start() perf evsel: Fix an issue where perf report fails to show the proper percentage perf tools: Fix prefix matching for kernel maps perf tools: Fix perf stack to non executable on x86_64 perf: Remove deprecated WARN_ON_ONCE()
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ba36013cfb21..1b5c081d8b9f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2303,7 +2303,7 @@ do { \
static DEFINE_PER_CPU(int, perf_throttled_count);
static DEFINE_PER_CPU(u64, perf_throttled_seq);
-static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
+static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
{
struct hw_perf_event *hwc = &event->hw;
s64 period, sample_period;
@@ -2322,9 +2322,13 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
hwc->sample_period = sample_period;
if (local64_read(&hwc->period_left) > 8*sample_period) {
- event->pmu->stop(event, PERF_EF_UPDATE);
+ if (disable)
+ event->pmu->stop(event, PERF_EF_UPDATE);
+
local64_set(&hwc->period_left, 0);
- event->pmu->start(event, PERF_EF_RELOAD);
+
+ if (disable)
+ event->pmu->start(event, PERF_EF_RELOAD);
}
}
@@ -2350,6 +2354,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
return;
raw_spin_lock(&ctx->lock);
+ perf_pmu_disable(ctx->pmu);
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
@@ -2381,13 +2386,17 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
/*
* restart the event
* reload only if value has changed
+ * we have stopped the event so tell that
+ * to perf_adjust_period() to avoid stopping it
+ * twice.
*/
if (delta > 0)
- perf_adjust_period(event, period, delta);
+ perf_adjust_period(event, period, delta, false);
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
}
+ perf_pmu_enable(ctx->pmu);
raw_spin_unlock(&ctx->lock);
}
@@ -4562,7 +4571,7 @@ static int __perf_event_overflow(struct perf_event *event,
hwc->freq_time_stamp = now;
if (delta > 0 && delta < 2*TICK_NSEC)
- perf_adjust_period(event, delta, hwc->last_period);
+ perf_adjust_period(event, delta, hwc->last_period, true);
}
/*