diff options
author | Ingo Molnar <mingo@elte.hu> | 2012-03-13 16:32:54 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2012-03-13 16:33:03 +0100 |
commit | ef15eda98217f5183f457e7a2de8b79555ef908b (patch) | |
tree | f8f22b48f7bb237c9aa6646175f3e17eeac4af0e /kernel/events | |
parent | 5cb4ac3a583d4ee18c8682ab857e093c4a0d0895 (diff) | |
parent | ef334a20d84f52407a8a2afd02ddeaecbef0ad3d (diff) | |
download | linux-ef15eda98217f5183f457e7a2de8b79555ef908b.tar.gz linux-ef15eda98217f5183f457e7a2de8b79555ef908b.tar.bz2 linux-ef15eda98217f5183f457e7a2de8b79555ef908b.zip |
Merge branch 'x86/cleanups' into perf/uprobes
Merge reason: We want to merge a dependent patch.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 19 | ||||
-rw-r--r-- | kernel/events/hw_breakpoint.c | 4 |
2 files changed, 16 insertions, 7 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 7c3b9de55f6b..94afe5b91c6a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2303,7 +2303,7 @@ do { \ static DEFINE_PER_CPU(int, perf_throttled_count); static DEFINE_PER_CPU(u64, perf_throttled_seq); -static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) +static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) { struct hw_perf_event *hwc = &event->hw; s64 period, sample_period; @@ -2322,9 +2322,13 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) hwc->sample_period = sample_period; if (local64_read(&hwc->period_left) > 8*sample_period) { - event->pmu->stop(event, PERF_EF_UPDATE); + if (disable) + event->pmu->stop(event, PERF_EF_UPDATE); + local64_set(&hwc->period_left, 0); - event->pmu->start(event, PERF_EF_RELOAD); + + if (disable) + event->pmu->start(event, PERF_EF_RELOAD); } } @@ -2350,6 +2354,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, return; raw_spin_lock(&ctx->lock); + perf_pmu_disable(ctx->pmu); list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (event->state != PERF_EVENT_STATE_ACTIVE) @@ -2381,13 +2386,17 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, /* * restart the event * reload only if value has changed + * we have stopped the event so tell that + * to perf_adjust_period() to avoid stopping it + * twice. */ if (delta > 0) - perf_adjust_period(event, period, delta); + perf_adjust_period(event, period, delta, false); event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); } + perf_pmu_enable(ctx->pmu); raw_spin_unlock(&ctx->lock); } @@ -4567,7 +4576,7 @@ static int __perf_event_overflow(struct perf_event *event, hwc->freq_time_stamp = now; if (delta > 0 && delta < 2*TICK_NSEC) - perf_adjust_period(event, delta, hwc->last_period); + perf_adjust_period(event, delta, hwc->last_period, true); } /* diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index b0309f76d777..3330022a7ac1 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -658,10 +658,10 @@ int __init init_hw_breakpoint(void) err_alloc: for_each_possible_cpu(err_cpu) { - if (err_cpu == cpu) - break; for (i = 0; i < TYPE_MAX; i++) kfree(per_cpu(nr_task_bp_pinned[i], cpu)); + if (err_cpu == cpu) + break; } return -ENOMEM; |