summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2014-09-22 15:27:06 -0700
committerIngo Molnar <mingo@kernel.org>2014-10-03 06:02:49 +0200
commit4f971248bc6ad2bb2a89a25a072ebfec5757d298 (patch)
tree0b3282180645dc7e7c9010e3c55b0721f9350d9c
parent69e8f5b15ef43da1e177665a3e2063711ee4c9de (diff)
downloadlinux-4f971248bc6ad2bb2a89a25a072ebfec5757d298.tar.gz
linux-4f971248bc6ad2bb2a89a25a072ebfec5757d298.tar.bz2
linux-4f971248bc6ad2bb2a89a25a072ebfec5757d298.zip
perf/x86/intel/uncore: Fix minor race in box set up
I was looking for the trinity oops cause in the uncore driver. (so far didn't found it) However I found this tiny race: when a box is set up two threads on the same CPU, they may be setting up the box in parallel (e.g. with kernel preemption). This could lead to the reference count being increasing too much. Always recheck there is no existing cpu reference inside the lock. Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: eranian@google.com Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Link: http://lkml.kernel.org/r/1411424826-15629-1-git-send-email-andi@firstfloor.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 42d00e53175d..9762dbd9f3f7 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -42,6 +42,9 @@ struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu
return box;
raw_spin_lock(&uncore_box_lock);
+ /* Recheck in lock to handle races. */
+ if (*per_cpu_ptr(pmu->box, cpu))
+ goto out;
list_for_each_entry(box, &pmu->box_list, list) {
if (box->phys_id == topology_physical_package_id(cpu)) {
atomic_inc(&box->refcnt);
@@ -49,6 +52,7 @@ struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu
break;
}
}
+out:
raw_spin_unlock(&uncore_box_lock);
return *per_cpu_ptr(pmu->box, cpu);