summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhyung Kim <namhyung@kernel.org>2022-09-08 14:41:02 -0700
committerPeter Zijlstra <peterz@infradead.org>2022-09-13 15:03:22 +0200
commit3749d33e510c3dc695b3a5886b706310890d7ebd (patch)
tree38264855d5b66b021dc61ae373b61daebd665e17
parentfae9ebde9696385fa2e993e752cf68d9781f3ea0 (diff)
downloadlinux-3749d33e510c3dc695b3a5886b706310890d7ebd.tar.gz
linux-3749d33e510c3dc695b3a5886b706310890d7ebd.tar.bz2
linux-3749d33e510c3dc695b3a5886b706310890d7ebd.zip
perf: Use sample_flags for callchain
So that it can call perf_callchain() only if needed. Historically it used __PERF_SAMPLE_CALLCHAIN_EARLY but we can do that with sample_flags in the struct perf_sample_data. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20220908214104.3851807-1-namhyung@kernel.org
-rw-r--r--arch/x86/events/amd/ibs.c4
-rw-r--r--arch/x86/events/intel/ds.c8
-rw-r--r--kernel/events/core.c2
3 files changed, 10 insertions, 4 deletions
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index c251bc44c088..dab094166693 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -798,8 +798,10 @@ fail:
* recorded as part of interrupt regs. Thus we need to use rip from
* interrupt regs while unwinding call stack.
*/
- if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+ if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
data.callchain = perf_callchain(event, iregs);
+ data.sample_flags |= PERF_SAMPLE_CALLCHAIN;
+ }
throttle = perf_event_overflow(event, &data, &regs);
out:
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index a5275c235c2a..4ba6ab6d0d92 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1546,8 +1546,10 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event,
* previous PMI context or an (I)RET happened between the record and
* PMI.
*/
- if (sample_type & PERF_SAMPLE_CALLCHAIN)
+ if (sample_type & PERF_SAMPLE_CALLCHAIN) {
data->callchain = perf_callchain(event, iregs);
+ data->sample_flags |= PERF_SAMPLE_CALLCHAIN;
+ }
/*
* We use the interrupt regs as a base because the PEBS record does not
@@ -1719,8 +1721,10 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
* previous PMI context or an (I)RET happened between the record and
* PMI.
*/
- if (sample_type & PERF_SAMPLE_CALLCHAIN)
+ if (sample_type & PERF_SAMPLE_CALLCHAIN) {
data->callchain = perf_callchain(event, iregs);
+ data->sample_flags |= PERF_SAMPLE_CALLCHAIN;
+ }
*regs = *iregs;
/* The ip in basic is EventingIP */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 3e90e454b995..c98ecf3e09ba 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7320,7 +7320,7 @@ void perf_prepare_sample(struct perf_event_header *header,
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int size = 1;
- if (!(sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
+ if (filtered_sample_type & PERF_SAMPLE_CALLCHAIN)
data->callchain = perf_callchain(event, regs);
size += data->callchain->nr;