diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-05-25 21:41:28 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-25 21:41:28 +0200 |
commit | 53b441a565bf4036ab49c8ea04c5ad06ace7dd6b (patch) | |
tree | df09d4ee33ce4d6569dd4d5646a88e40ff6a138c /arch | |
parent | a78ac3258782f3e64cb40beb5990808e1febcc0c (diff) | |
download | linux-stable-53b441a565bf4036ab49c8ea04c5ad06ace7dd6b.tar.gz linux-stable-53b441a565bf4036ab49c8ea04c5ad06ace7dd6b.tar.bz2 linux-stable-53b441a565bf4036ab49c8ea04c5ad06ace7dd6b.zip |
Revert "perf_counter, x86: speed up the scheduling fast-path"
This reverts commit b68f1d2e7aa21029d73c7d453a8046e95d351740.
It is causing problems (stuck/stuttering profiling) - when mixed
NMI and non-NMI counters are used.
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090525153931.703093461@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index c4b543d1a86f..189bf9d7cdab 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -293,7 +293,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter) return -EACCES; hwc->nmi = 1; } - perf_counters_lapic_init(hwc->nmi); if (!hwc->irq_period) hwc->irq_period = x86_pmu.max_period; @@ -612,6 +611,8 @@ try_generic: hwc->counter_base = x86_pmu.perfctr; } + perf_counters_lapic_init(hwc->nmi); + x86_pmu.disable(hwc, idx); cpuc->counters[idx] = counter; @@ -1037,7 +1038,7 @@ void __init init_hw_perf_counters(void) pr_info("... counter mask: %016Lx\n", perf_counter_mask); - perf_counters_lapic_init(1); + perf_counters_lapic_init(0); register_die_notifier(&perf_counter_nmi_notifier); } |