diff options
author | Sasha Levin <sashal@kernel.org> | 2020-10-12 23:28:52 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2020-10-29 10:11:03 +0100 |
commit | 7748425f370fbd14e17390155b3eaff4c7018366 (patch) | |
tree | 7dd3ae4b2f2043538a3aa00be7e1290bd6b675cb /arch | |
parent | 88acb494448a42662aadf0a83761ac07d1e359aa (diff) | |
download | linux-stable-7748425f370fbd14e17390155b3eaff4c7018366.tar.gz linux-stable-7748425f370fbd14e17390155b3eaff4c7018366.tar.bz2 linux-stable-7748425f370fbd14e17390155b3eaff4c7018366.zip |
perf/x86: Fix n_pair for cancelled txn
[ Upstream commit 871a93b0aad65a7f44ee25f2d17932ef6d559850 ]
Kan reported that n_metric gets corrupted for cancelled transactions;
a similar issue exists for n_pair for AMD's Large Increment thing.
The problem was confirmed and confirmed fixed by Kim using:
sudo perf stat -e "{cycles,cycles,cycles,cycles}:D" -a sleep 10 &
# should succeed:
sudo perf stat -e "{fp_ret_sse_avx_ops.all}:D" -a workload
# should fail:
sudo perf stat -e "{fp_ret_sse_avx_ops.all,fp_ret_sse_avx_ops.all,cycles}:D" -a workload
# previously failed, now succeeds with this patch:
sudo perf stat -e "{fp_ret_sse_avx_ops.all}:D" -a workload
Fixes: 5738891229a2 ("perf/x86/amd: Add support for Large Increment per Cycle Events")
Reported-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Kim Phillips <kim.phillips@amd.com>
Link: https://lkml.kernel.org/r/20201005082516.GG2628@hirez.programming.kicks-ass.net
Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/events/core.c | 6 | ||||
-rw-r--r-- | arch/x86/events/perf_event.h | 1 |
2 files changed, 6 insertions, 1 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 1cbf57dc2ac8..11bbc6590f90 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1087,8 +1087,10 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, cpuc->event_list[n] = event; n++; - if (is_counter_pair(&event->hw)) + if (is_counter_pair(&event->hw)) { cpuc->n_pair++; + cpuc->n_txn_pair++; + } } return n; } @@ -1962,6 +1964,7 @@ static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) perf_pmu_disable(pmu); __this_cpu_write(cpu_hw_events.n_txn, 0); + __this_cpu_write(cpu_hw_events.n_txn_pair, 0); } /* @@ -1987,6 +1990,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu) */ __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn)); __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn)); + __this_cpu_sub(cpu_hw_events.n_pair, __this_cpu_read(cpu_hw_events.n_txn_pair)); perf_pmu_enable(pmu); } diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 7b68ab5f19e7..0e74235cdac9 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -210,6 +210,7 @@ struct cpu_hw_events { they've never been enabled yet */ int n_txn; /* the # last events in the below arrays; added in the current transaction */ + int n_txn_pair; int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ u64 tags[X86_PMC_IDX_MAX]; |