summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-05-21 10:57:39 +0200
committerIngo Molnar <mingo@kernel.org>2015-05-27 09:17:45 +0200
commit43ef205bded025432f5eeeb3503c11fe5cd1913e (patch)
tree5232d5c0ceba464c361860d3d814bcef01e219e1 /arch
parent1fe684e349e904adeed2883cfdeef259a21c94f4 (diff)
downloadlinux-stable-43ef205bded025432f5eeeb3503c11fe5cd1913e.tar.gz
linux-stable-43ef205bded025432f5eeeb3503c11fe5cd1913e.tar.bz2
linux-stable-43ef205bded025432f5eeeb3503c11fe5cd1913e.zip
perf/x86/intel: Remove intel_excl_states::init_state
For some obscure reason intel_{start,stop}_scheduling() copy the HT state to an intermediate array. This would make sense if we ever were to make changes to it which we'd have to discard. Except we don't. By the time we call intel_commit_scheduling() we're; as the name implies; committed to them. We'll never back out. A further hint its pointless is that stop_scheduling() unconditionally publishes the state. So the intermediate array is pointless, modify the state in place and kill the extra array. And remove the pointless array initialization: INTEL_EXCL_UNUSED == 0. Note; all is serialized by intel_excl_cntr::lock. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c22
3 files changed, 2 insertions, 22 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 4f7001f28936..d275da3d81dd 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -884,7 +884,6 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
}
if (!assign || unsched) {
-
for (i = 0; i < n; i++) {
e = cpuc->event_list[i];
/*
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index e5609522255c..89e6cd61e6ae 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -133,7 +133,6 @@ enum intel_excl_state_type {
};
struct intel_excl_states {
- enum intel_excl_state_type init_state[X86_PMC_IDX_MAX];
enum intel_excl_state_type state[X86_PMC_IDX_MAX];
bool sched_started; /* true if scheduling has started */
};
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 6a3e794cdc06..f3201439031d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1927,11 +1927,6 @@ intel_start_scheduling(struct cpu_hw_events *cpuc)
* makes scheduling appear as a transaction
*/
raw_spin_lock(&excl_cntrs->lock);
-
- /*
- * Save a copy of our state to work on.
- */
- memcpy(xl->init_state, xl->state, sizeof(xl->init_state));
}
static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
@@ -1955,9 +1950,9 @@ static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cnt
lockdep_assert_held(&excl_cntrs->lock);
if (c->flags & PERF_X86_EVENT_EXCL)
- xl->init_state[cntr] = INTEL_EXCL_EXCLUSIVE;
+ xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
else
- xl->init_state[cntr] = INTEL_EXCL_SHARED;
+ xl->state[cntr] = INTEL_EXCL_SHARED;
}
static void
@@ -1980,11 +1975,6 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
xl = &excl_cntrs->states[tid];
- /*
- * Commit the working state.
- */
- memcpy(xl->state, xl->init_state, sizeof(xl->state));
-
xl->sched_started = false;
/*
* release shared state lock (acquired in intel_start_scheduling())
@@ -2519,19 +2509,11 @@ struct intel_shared_regs *allocate_shared_regs(int cpu)
static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
{
struct intel_excl_cntrs *c;
- int i;
c = kzalloc_node(sizeof(struct intel_excl_cntrs),
GFP_KERNEL, cpu_to_node(cpu));
if (c) {
raw_spin_lock_init(&c->lock);
- for (i = 0; i < X86_PMC_IDX_MAX; i++) {
- c->states[0].state[i] = INTEL_EXCL_UNUSED;
- c->states[0].init_state[i] = INTEL_EXCL_UNUSED;
-
- c->states[1].state[i] = INTEL_EXCL_UNUSED;
- c->states[1].init_state[i] = INTEL_EXCL_UNUSED;
- }
c->core_id = -1;
}
return c;