diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-05-22 11:36:13 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-05-27 09:17:46 +0200 |
commit | ba040653b48d32afa8b6c3331eae97c6bbb66f03 (patch) | |
tree | a48ee59b5f38a1932f61147dd49c52a6c0f864ca /arch | |
parent | 8736e548db0f48244132bc36331496494625bbaf (diff) | |
download | linux-ba040653b48d32afa8b6c3331eae97c6bbb66f03.tar.gz linux-ba040653b48d32afa8b6c3331eae97c6bbb66f03.tar.bz2 linux-ba040653b48d32afa8b6c3331eae97c6bbb66f03.zip |
perf/x86/intel: Simplify put_exclusive_constraints()
Don't bother with taking locks if we're not actually going to do
anything. Also, drop the _irqsave(), this is very much only called
from IRQ-disabled context.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 29 |
1 files changed, 15 insertions, 14 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index f3201439031d..74f19d9268bb 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -2130,7 +2130,6 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; int tid = cpuc->excl_thread_id; struct intel_excl_states *xl; - unsigned long flags = 0; /* keep compiler happy */ /* * nothing needed if in group validation mode @@ -2141,7 +2140,6 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, if (WARN_ON_ONCE(!excl_cntrs)) return; - xl = &excl_cntrs->states[tid]; if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) { hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT; if (!--cpuc->n_excl) @@ -2149,22 +2147,25 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, } /* - * put_constraint may be called from x86_schedule_events() - * which already has the lock held so here make locking - * conditional + * If event was actually assigned, then mark the counter state as + * unused now. */ - if (!xl->sched_started) - raw_spin_lock_irqsave(&excl_cntrs->lock, flags); + if (hwc->idx >= 0) { + xl = &excl_cntrs->states[tid]; + + /* + * put_constraint may be called from x86_schedule_events() + * which already has the lock held so here make locking + * conditional. + */ + if (!xl->sched_started) + raw_spin_lock(&excl_cntrs->lock); - /* - * if event was actually assigned, then mark the - * counter state as unused now - */ - if (hwc->idx >= 0) xl->state[hwc->idx] = INTEL_EXCL_UNUSED; - if (!xl->sched_started) - raw_spin_unlock_irqrestore(&excl_cntrs->lock, flags); + if (!xl->sched_started) + raw_spin_unlock(&excl_cntrs->lock); + } } static void |