summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-16 14:08:43 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-16 14:08:43 -0700
commit36db171cc733bc7b8c628ef21831467d1919decd (patch)
tree96c6bda274155d7f4e41b2168edc291216f477ba /tools
parent3469d261eac65912927dca13ee8f77c744ad7aa2 (diff)
parent3f56e687a138481894a1088d5aa7d41951bdb020 (diff)
downloadlinux-36db171cc733bc7b8c628ef21831467d1919decd.tar.gz
linux-36db171cc733bc7b8c628ef21831467d1919decd.tar.bz2
linux-36db171cc733bc7b8c628ef21831467d1919decd.zip
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar: "Bigger kernel side changes: - Add backwards writing capability to the perf ring-buffer code, which is preparation for future advanced features like robust 'overwrite support' and snapshot mode. (Wang Nan) - Add pause and resume ioctls for the perf ringbuffer (Wang Nan) - x86 Intel cstate code cleanups and reorgnization (Thomas Gleixner) - x86 Intel uncore and CPU PMU driver updates (Kan Liang, Peter Zijlstra) - x86 AUX (Intel PT) related enhancements and updates (Alexander Shishkin) - x86 MSR PMU driver enhancements and updates (Huang Rui) - ... and lots of other changes spread out over 40+ commits. Biggest tooling side changes: - 'perf trace' features and enhancements. (Arnaldo Carvalho de Melo) - BPF tooling updates (Wang Nan) - 'perf sched' updates (Jiri Olsa) - 'perf probe' updates (Masami Hiramatsu) - ... plus 200+ other enhancements, fixes and cleanups to tools/ The merge commits, the shortlog and the changelogs contain a lot more details" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (249 commits) perf/core: Disable the event on a truncated AUX record perf/x86/intel/pt: Generate PMI in the STOP region as well perf buildid-cache: Use lsdir() for looking up buildid caches perf symbols: Use lsdir() for the search in kcore cache directory perf tools: Use SBUILD_ID_SIZE where applicable perf tools: Fix lsdir to set errno correctly perf trace: Move seccomp args beautifiers to tools/perf/trace/beauty/ perf trace: Move flock op beautifier to tools/perf/trace/beauty/ perf build: Add build-test for debug-frame on arm/arm64 perf build: Add build-test for libunwind cross-platforms support perf script: Fix export of callchains with recursion in db-export perf script: Fix callchain addresses in db-export perf script: Fix symbol insertion behavior in db-export perf symbols: Add dso__insert_symbol function perf scripting python: Use Py_FatalError instead of die() perf tools: Remove xrealloc and ALLOC_GROW perf help: Do not use ALLOC_GROW in add_cmd_list perf pmu: Make pmu_formats_string to check return value of strbuf perf header: Make topology checkers to check return value of strbuf perf tools: Make alias handler to check return value of strbuf ...
Diffstat (limited to 'tools')
-rw-r--r--tools/Makefile3
-rw-r--r--tools/build/Makefile.feature8
-rw-r--r--tools/build/feature/Makefile23
-rw-r--r--tools/build/feature/test-bpf.c3
-rw-r--r--tools/build/feature/test-libunwind-aarch64.c26
-rw-r--r--tools/build/feature/test-libunwind-arm.c27
-rw-r--r--tools/build/feature/test-libunwind-debug-frame-aarch64.c16
-rw-r--r--tools/build/feature/test-libunwind-debug-frame-arm.c16
-rw-r--r--tools/build/feature/test-libunwind-x86.c27
-rw-r--r--tools/build/feature/test-libunwind-x86_64.c27
-rw-r--r--tools/lib/api/fs/fs.c13
-rw-r--r--tools/lib/api/fs/fs.h2
-rw-r--r--tools/perf/Documentation/intel-pt.txt7
-rw-r--r--tools/perf/Documentation/itrace.txt8
-rw-r--r--tools/perf/Documentation/perf-annotate.txt2
-rw-r--r--tools/perf/Documentation/perf-diff.txt2
-rw-r--r--tools/perf/Documentation/perf-list.txt107
-rw-r--r--tools/perf/Documentation/perf-mem.txt8
-rw-r--r--tools/perf/Documentation/perf-record.txt13
-rw-r--r--tools/perf/Documentation/perf-report.txt4
-rw-r--r--tools/perf/Documentation/perf-sched.txt16
-rw-r--r--tools/perf/Documentation/perf-script.txt14
-rw-r--r--tools/perf/Documentation/perf-top.txt2
-rw-r--r--tools/perf/Documentation/perf-trace.txt32
-rw-r--r--tools/perf/Makefile.perf15
-rw-r--r--tools/perf/arch/powerpc/Makefile1
-rw-r--r--tools/perf/arch/powerpc/util/dwarf-regs.c40
-rw-r--r--tools/perf/arch/powerpc/util/sym-handling.c43
-rw-r--r--tools/perf/arch/x86/Makefile23
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl376
-rwxr-xr-xtools/perf/arch/x86/entry/syscalls/syscalltbl.sh39
-rw-r--r--tools/perf/arch/x86/tests/perf-time-to-tsc.c2
-rw-r--r--tools/perf/arch/x86/util/intel-bts.c5
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c5
-rw-r--r--tools/perf/arch/x86/util/tsc.c32
-rw-r--r--tools/perf/arch/x86/util/tsc.h17
-rw-r--r--tools/perf/bench/futex-lock-pi.c2
-rw-r--r--tools/perf/bench/futex.h6
-rw-r--r--tools/perf/bench/mem-functions.c22
-rw-r--r--tools/perf/builtin-config.c39
-rw-r--r--tools/perf/builtin-diff.c4
-rw-r--r--tools/perf/builtin-help.c18
-rw-r--r--tools/perf/builtin-inject.c1
-rw-r--r--tools/perf/builtin-kmem.c2
-rw-r--r--tools/perf/builtin-kvm.c2
-rw-r--r--tools/perf/builtin-mem.c11
-rw-r--r--tools/perf/builtin-record.c267
-rw-r--r--tools/perf/builtin-report.c19
-rw-r--r--tools/perf/builtin-sched.c198
-rw-r--r--tools/perf/builtin-script.c56
-rw-r--r--tools/perf/builtin-stat.c8
-rw-r--r--tools/perf/builtin-top.c39
-rw-r--r--tools/perf/builtin-trace.c1085
-rw-r--r--tools/perf/config/Makefile5
-rw-r--r--tools/perf/jvmti/jvmti_agent.c43
-rw-r--r--tools/perf/perf.c16
-rw-r--r--tools/perf/perf.h1
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py52
-rw-r--r--tools/perf/tests/Build2
-rw-r--r--tools/perf/tests/backward-ring-buffer.c151
-rw-r--r--tools/perf/tests/bpf.c2
-rw-r--r--tools/perf/tests/builtin-test.c8
-rw-r--r--tools/perf/tests/code-reading.c2
-rw-r--r--tools/perf/tests/dso-data.c2
-rw-r--r--tools/perf/tests/event-times.c236
-rw-r--r--tools/perf/tests/event_update.c2
-rw-r--r--tools/perf/tests/hists_common.c2
-rw-r--r--tools/perf/tests/hists_cumulate.c4
-rw-r--r--tools/perf/tests/hists_filter.c2
-rw-r--r--tools/perf/tests/hists_link.c4
-rw-r--r--tools/perf/tests/hists_output.c4
-rw-r--r--tools/perf/tests/keep-tracking.c2
-rw-r--r--tools/perf/tests/openat-syscall-tp-fields.c2
-rw-r--r--tools/perf/tests/perf-record.c2
-rw-r--r--tools/perf/tests/switch-tracking.c2
-rw-r--r--tools/perf/tests/tests.h2
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c11
-rw-r--r--tools/perf/trace/beauty/eventfd.c38
-rw-r--r--tools/perf/trace/beauty/flock.c31
-rw-r--r--tools/perf/trace/beauty/futex_op.c44
-rw-r--r--tools/perf/trace/beauty/mmap.c158
-rw-r--r--tools/perf/trace/beauty/mode_t.c68
-rw-r--r--tools/perf/trace/beauty/msg_flags.c62
-rw-r--r--tools/perf/trace/beauty/open_flags.c56
-rw-r--r--tools/perf/trace/beauty/perf_event_open.c43
-rw-r--r--tools/perf/trace/beauty/pid.c21
-rw-r--r--tools/perf/trace/beauty/sched_policy.c44
-rw-r--r--tools/perf/trace/beauty/seccomp.c52
-rw-r--r--tools/perf/trace/beauty/signum.c53
-rw-r--r--tools/perf/trace/beauty/socket_type.c60
-rw-r--r--tools/perf/trace/beauty/waitid_options.c26
-rw-r--r--tools/perf/ui/browsers/hists.c40
-rw-r--r--tools/perf/ui/gtk/hists.c2
-rw-r--r--tools/perf/ui/hist.c2
-rw-r--r--tools/perf/ui/stdio/hist.c3
-rw-r--r--tools/perf/util/Build8
-rw-r--r--tools/perf/util/annotate.c4
-rw-r--r--tools/perf/util/auxtrace.c7
-rw-r--r--tools/perf/util/auxtrace.h2
-rw-r--r--tools/perf/util/bpf-loader.c143
-rw-r--r--tools/perf/util/bpf-loader.h19
-rw-r--r--tools/perf/util/build-id.c36
-rw-r--r--tools/perf/util/cache.h19
-rw-r--r--tools/perf/util/call-path.c122
-rw-r--r--tools/perf/util/call-path.h77
-rw-r--r--tools/perf/util/callchain.c9
-rw-r--r--tools/perf/util/callchain.h9
-rw-r--r--tools/perf/util/config.c222
-rw-r--r--tools/perf/util/config.h26
-rw-r--r--tools/perf/util/cpumap.c12
-rw-r--r--tools/perf/util/cpumap.h2
-rw-r--r--tools/perf/util/data.c41
-rw-r--r--tools/perf/util/data.h11
-rw-r--r--tools/perf/util/db-export.c89
-rw-r--r--tools/perf/util/db-export.h3
-rw-r--r--tools/perf/util/dso.c4
-rw-r--r--tools/perf/util/dwarf-aux.c52
-rw-r--r--tools/perf/util/event.c1
-rw-r--r--tools/perf/util/event.h9
-rw-r--r--tools/perf/util/evlist.c174
-rw-r--r--tools/perf/util/evlist.h22
-rw-r--r--tools/perf/util/evsel.c139
-rw-r--r--tools/perf/util/evsel.h28
-rw-r--r--tools/perf/util/evsel_fprintf.c212
-rw-r--r--tools/perf/util/header.c33
-rw-r--r--tools/perf/util/help-unknown-cmd.c30
-rw-r--r--tools/perf/util/hist.c23
-rw-r--r--tools/perf/util/hist.h12
-rw-r--r--tools/perf/util/intel-bts.c5
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c2
-rw-r--r--tools/perf/util/intel-pt.c22
-rw-r--r--tools/perf/util/jitdump.c41
-rw-r--r--tools/perf/util/jitdump.h3
-rw-r--r--tools/perf/util/machine.c105
-rw-r--r--tools/perf/util/machine.h7
-rw-r--r--tools/perf/util/map.c16
-rw-r--r--tools/perf/util/ordered-events.c9
-rw-r--r--tools/perf/util/ordered-events.h1
-rw-r--r--tools/perf/util/pmu.c23
-rw-r--r--tools/perf/util/probe-event.c406
-rw-r--r--tools/perf/util/probe-event.h5
-rw-r--r--tools/perf/util/probe-file.c3
-rw-r--r--tools/perf/util/probe-finder.c36
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/quote.c36
-rw-r--r--tools/perf/util/quote.h2
-rw-r--r--tools/perf/util/rb_resort.h149
-rw-r--r--tools/perf/util/record.c5
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c126
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c47
-rw-r--r--tools/perf/util/session.c115
-rw-r--r--tools/perf/util/session.h12
-rw-r--r--tools/perf/util/sort.c35
-rw-r--r--tools/perf/util/sort.h7
-rw-r--r--tools/perf/util/stat.c4
-rw-r--r--tools/perf/util/strbuf.c93
-rw-r--r--tools/perf/util/strbuf.h25
-rw-r--r--tools/perf/util/symbol-elf.c20
-rw-r--r--tools/perf/util/symbol.c108
-rw-r--r--tools/perf/util/symbol.h19
-rw-r--r--tools/perf/util/symbol_fprintf.c71
-rw-r--r--tools/perf/util/syscalltbl.c134
-rw-r--r--tools/perf/util/syscalltbl.h20
-rw-r--r--tools/perf/util/thread-stack.c139
-rw-r--r--tools/perf/util/thread-stack.h31
-rw-r--r--tools/perf/util/thread.c21
-rw-r--r--tools/perf/util/thread.h8
-rw-r--r--tools/perf/util/thread_map.c14
-rw-r--r--tools/perf/util/thread_map.h3
-rw-r--r--tools/perf/util/tool.h1
-rw-r--r--tools/perf/util/trigger.h94
-rw-r--r--tools/perf/util/tsc.h21
-rw-r--r--tools/perf/util/unwind-libunwind.c25
-rw-r--r--tools/perf/util/util.c38
-rw-r--r--tools/perf/util/util.h15
-rw-r--r--tools/perf/util/wrapper.c29
176 files changed, 5883 insertions, 2014 deletions
diff --git a/tools/Makefile b/tools/Makefile
index 60c7e6c8ff17..6bf68fe7dd29 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -137,7 +137,8 @@ libsubcmd_clean:
$(call descend,lib/subcmd,clean)
perf_clean:
- $(call descend,$(@:_clean=),clean)
+ $(Q)mkdir -p $(PERF_O) .
+ $(Q)$(MAKE) --no-print-directory -C perf O=$(PERF_O) subdir= clean
selftests_clean:
$(call descend,testing/$(@:_clean=),clean)
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 9f878619077a..57c8f98874e8 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -49,6 +49,10 @@ FEATURE_TESTS_BASIC := \
libslang \
libcrypto \
libunwind \
+ libunwind-x86 \
+ libunwind-x86_64 \
+ libunwind-arm \
+ libunwind-aarch64 \
pthread-attr-setaffinity-np \
stackprotector-all \
timerfd \
@@ -69,7 +73,9 @@ FEATURE_TESTS_EXTRA := \
libbabeltrace \
liberty \
liberty-z \
- libunwind-debug-frame
+ libunwind-debug-frame \
+ libunwind-debug-frame-arm \
+ libunwind-debug-frame-aarch64
FEATURE_TESTS ?= $(FEATURE_TESTS_BASIC)
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 4ae94dbfdab9..3d88f09e188b 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -27,6 +27,12 @@ FILES= \
test-libcrypto.bin \
test-libunwind.bin \
test-libunwind-debug-frame.bin \
+ test-libunwind-x86.bin \
+ test-libunwind-x86_64.bin \
+ test-libunwind-arm.bin \
+ test-libunwind-aarch64.bin \
+ test-libunwind-debug-frame-arm.bin \
+ test-libunwind-debug-frame-aarch64.bin \
test-pthread-attr-setaffinity-np.bin \
test-stackprotector-all.bin \
test-timerfd.bin \
@@ -103,6 +109,23 @@ $(OUTPUT)test-libunwind.bin:
$(OUTPUT)test-libunwind-debug-frame.bin:
$(BUILD) -lelf
+$(OUTPUT)test-libunwind-x86.bin:
+ $(BUILD) -lelf -lunwind-x86
+
+$(OUTPUT)test-libunwind-x86_64.bin:
+ $(BUILD) -lelf -lunwind-x86_64
+
+$(OUTPUT)test-libunwind-arm.bin:
+ $(BUILD) -lelf -lunwind-arm
+
+$(OUTPUT)test-libunwind-aarch64.bin:
+ $(BUILD) -lelf -lunwind-aarch64
+
+$(OUTPUT)test-libunwind-debug-frame-arm.bin:
+ $(BUILD) -lelf -lunwind-arm
+
+$(OUTPUT)test-libunwind-debug-frame-aarch64.bin:
+ $(BUILD) -lelf -lunwind-aarch64
$(OUTPUT)test-libaudit.bin:
$(BUILD) -laudit
diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c
index b389026839b9..e04ab89a1013 100644
--- a/tools/build/feature/test-bpf.c
+++ b/tools/build/feature/test-bpf.c
@@ -27,10 +27,9 @@ int main(void)
attr.log_level = 0;
attr.kern_version = 0;
- attr = attr;
/*
* Test existence of __NR_bpf and BPF_PROG_LOAD.
* This call should fail if we run the testcase.
*/
- return syscall(__NR_bpf, BPF_PROG_LOAD, attr, sizeof(attr));
+ return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
}
diff --git a/tools/build/feature/test-libunwind-aarch64.c b/tools/build/feature/test-libunwind-aarch64.c
new file mode 100644
index 000000000000..fc03fb64e8c1
--- /dev/null
+++ b/tools/build/feature/test-libunwind-aarch64.c
@@ -0,0 +1,26 @@
+#include <libunwind-aarch64.h>
+#include <stdlib.h>
+
+extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
+ unw_word_t ip,
+ unw_dyn_info_t *di,
+ unw_proc_info_t *pi,
+ int need_unwind_info, void *arg);
+
+#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
+
+static unw_accessors_t accessors;
+
+int main(void)
+{
+ unw_addr_space_t addr_space;
+
+ addr_space = unw_create_addr_space(&accessors, 0);
+ if (addr_space)
+ return 0;
+
+ unw_init_remote(NULL, addr_space, NULL);
+ dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL);
+
+ return 0;
+}
diff --git a/tools/build/feature/test-libunwind-arm.c b/tools/build/feature/test-libunwind-arm.c
new file mode 100644
index 000000000000..632d95ec641f
--- /dev/null
+++ b/tools/build/feature/test-libunwind-arm.c
@@ -0,0 +1,27 @@
+#include <libunwind-arm.h>
+#include <stdlib.h>
+
+extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
+ unw_word_t ip,
+ unw_dyn_info_t *di,
+ unw_proc_info_t *pi,
+ int need_unwind_info, void *arg);
+
+
+#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
+
+static unw_accessors_t accessors;
+
+int main(void)
+{
+ unw_addr_space_t addr_space;
+
+ addr_space = unw_create_addr_space(&accessors, 0);
+ if (addr_space)
+ return 0;
+
+ unw_init_remote(NULL, addr_space, NULL);
+ dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL);
+
+ return 0;
+}
diff --git a/tools/build/feature/test-libunwind-debug-frame-aarch64.c b/tools/build/feature/test-libunwind-debug-frame-aarch64.c
new file mode 100644
index 000000000000..22844673fc26
--- /dev/null
+++ b/tools/build/feature/test-libunwind-debug-frame-aarch64.c
@@ -0,0 +1,16 @@
+#include <libunwind-aarch64.h>
+#include <stdlib.h>
+
+extern int
+UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
+ unw_word_t ip, unw_word_t segbase,
+ const char *obj_name, unw_word_t start,
+ unw_word_t end);
+
+#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
+
+int main(void)
+{
+ dwarf_find_debug_frame(0, NULL, 0, 0, NULL, 0, 0);
+ return 0;
+}
diff --git a/tools/build/feature/test-libunwind-debug-frame-arm.c b/tools/build/feature/test-libunwind-debug-frame-arm.c
new file mode 100644
index 000000000000..f98859684fee
--- /dev/null
+++ b/tools/build/feature/test-libunwind-debug-frame-arm.c
@@ -0,0 +1,16 @@
+#include <libunwind-arm.h>
+#include <stdlib.h>
+
+extern int
+UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
+ unw_word_t ip, unw_word_t segbase,
+ const char *obj_name, unw_word_t start,
+ unw_word_t end);
+
+#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
+
+int main(void)
+{
+ dwarf_find_debug_frame(0, NULL, 0, 0, NULL, 0, 0);
+ return 0;
+}
diff --git a/tools/build/feature/test-libunwind-x86.c b/tools/build/feature/test-libunwind-x86.c
new file mode 100644
index 000000000000..3561edce305e
--- /dev/null
+++ b/tools/build/feature/test-libunwind-x86.c
@@ -0,0 +1,27 @@
+#include <libunwind-x86.h>
+#include <stdlib.h>
+
+extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
+ unw_word_t ip,
+ unw_dyn_info_t *di,
+ unw_proc_info_t *pi,
+ int need_unwind_info, void *arg);
+
+
+#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
+
+static unw_accessors_t accessors;
+
+int main(void)
+{
+ unw_addr_space_t addr_space;
+
+ addr_space = unw_create_addr_space(&accessors, 0);
+ if (addr_space)
+ return 0;
+
+ unw_init_remote(NULL, addr_space, NULL);
+ dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL);
+
+ return 0;
+}
diff --git a/tools/build/feature/test-libunwind-x86_64.c b/tools/build/feature/test-libunwind-x86_64.c
new file mode 100644
index 000000000000..5add2517b2a1
--- /dev/null
+++ b/tools/build/feature/test-libunwind-x86_64.c
@@ -0,0 +1,27 @@
+#include <libunwind-x86_64.h>
+#include <stdlib.h>
+
+extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
+ unw_word_t ip,
+ unw_dyn_info_t *di,
+ unw_proc_info_t *pi,
+ int need_unwind_info, void *arg);
+
+
+#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
+
+static unw_accessors_t accessors;
+
+int main(void)
+{
+ unw_addr_space_t addr_space;
+
+ addr_space = unw_create_addr_space(&accessors, 0);
+ if (addr_space)
+ return 0;
+
+ unw_init_remote(NULL, addr_space, NULL);
+ dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL);
+
+ return 0;
+}
diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
index ef78c22ff44d..08556cf2c70d 100644
--- a/tools/lib/api/fs/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -351,6 +351,19 @@ int filename__read_str(const char *filename, char **buf, size_t *sizep)
return err;
}
+int procfs__read_str(const char *entry, char **buf, size_t *sizep)
+{
+ char path[PATH_MAX];
+ const char *procfs = procfs__mountpoint();
+
+ if (!procfs)
+ return -1;
+
+ snprintf(path, sizeof(path), "%s/%s", procfs, entry);
+
+ return filename__read_str(path, buf, sizep);
+}
+
int sysfs__read_ull(const char *entry, unsigned long long *value)
{
char path[PATH_MAX];
diff --git a/tools/lib/api/fs/fs.h b/tools/lib/api/fs/fs.h
index 9f6598098dc5..16c9c2ed7c5b 100644
--- a/tools/lib/api/fs/fs.h
+++ b/tools/lib/api/fs/fs.h
@@ -29,6 +29,8 @@ int filename__read_int(const char *filename, int *value);
int filename__read_ull(const char *filename, unsigned long long *value);
int filename__read_str(const char *filename, char **buf, size_t *sizep);
+int procfs__read_str(const char *entry, char **buf, size_t *sizep);
+
int sysctl__read_int(const char *sysctl, int *value);
int sysfs__read_int(const char *entry, int *value);
int sysfs__read_ull(const char *entry, unsigned long long *value);
diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt
index be764f9ec769..c6c8318e38a2 100644
--- a/tools/perf/Documentation/intel-pt.txt
+++ b/tools/perf/Documentation/intel-pt.txt
@@ -672,6 +672,7 @@ The letters are:
d create a debug log
g synthesize a call chain (use with i or x)
l synthesize last branch entries (use with i or x)
+ s skip initial number of events
"Instructions" events look like they were recorded by "perf record -e
instructions".
@@ -730,6 +731,12 @@ from one sample to the next.
To disable trace decoding entirely, use the option --no-itrace.
+It is also possible to skip events generated (instructions, branches, transactions)
+at the beginning. This is useful to ignore initialization code.
+
+ --itrace=i0nss1000000
+
+skips the first million instructions.
dump option
-----------
diff --git a/tools/perf/Documentation/itrace.txt b/tools/perf/Documentation/itrace.txt
index 65453f4c7006..e2a4c5e0dbe5 100644
--- a/tools/perf/Documentation/itrace.txt
+++ b/tools/perf/Documentation/itrace.txt
@@ -7,6 +7,7 @@
d create a debug log
g synthesize a call chain (use with i or x)
l synthesize last branch entries (use with i or x)
+ s skip initial number of events
The default is all events i.e. the same as --itrace=ibxe
@@ -24,3 +25,10 @@
Also the number of last branch entries (default 64, max. 1024) for
instructions or transactions events can be specified.
+
+ It is also possible to skip events generated (instructions, branches, transactions)
+ at the beginning. This is useful to ignore initialization code.
+
+ --itrace=i0nss1000000
+
+ skips the first million instructions.
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index e9cd39a92dc2..778f54d4d0bd 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -33,7 +33,7 @@ OPTIONS
-f::
--force::
- Don't complain, do it.
+ Don't do ownership validation.
-v::
--verbose::
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
index d1deb573877f..3e9490b9c533 100644
--- a/tools/perf/Documentation/perf-diff.txt
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -75,7 +75,7 @@ OPTIONS
-f::
--force::
- Don't complain, do it.
+ Don't do ownership validation.
--symfs=<directory>::
Look for files with symbols relative to this directory.
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index ec723d0a5bb3..a126e97a8114 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -93,6 +93,67 @@ raw encoding of 0x1A8 can be used:
You should refer to the processor specific documentation for getting these
details. Some of them are referenced in the SEE ALSO section below.
+ARBITRARY PMUS
+--------------
+
+perf also supports an extended syntax for specifying raw parameters
+to PMUs. Using this typically requires looking up the specific event
+in the CPU vendor specific documentation.
+
+The available PMUs and their raw parameters can be listed with
+
+ ls /sys/devices/*/format
+
+For example the raw event "LSD.UOPS" core pmu event above could
+be specified as
+
+ perf stat -e cpu/event=0xa8,umask=0x1,name=LSD.UOPS_CYCLES,cmask=1/ ...
+
+PER SOCKET PMUS
+---------------
+
+Some PMUs are not associated with a core, but with a whole CPU socket.
+Events on these PMUs generally cannot be sampled, but only counted globally
+with perf stat -a. They can be bound to one logical CPU, but will measure
+all the CPUs in the same socket.
+
+This example measures memory bandwidth every second
+on the first memory controller on socket 0 of a Intel Xeon system
+
+ perf stat -C 0 -a uncore_imc_0/cas_count_read/,uncore_imc_0/cas_count_write/ -I 1000 ...
+
+Each memory controller has its own PMU. Measuring the complete system
+bandwidth would require specifying all imc PMUs (see perf list output),
+and adding the values together.
+
+This example measures the combined core power every second
+
+ perf stat -I 1000 -e power/energy-cores/ -a
+
+ACCESS RESTRICTIONS
+-------------------
+
+For non root users generally only context switched PMU events are available.
+This is normally only the events in the cpu PMU, the predefined events
+like cycles and instructions and some software events.
+
+Other PMUs and global measurements are normally root only.
+Some event qualifiers, such as "any", are also root only.
+
+This can be overriden by setting the kernel.perf_event_paranoid
+sysctl to -1, which allows non root to use these events.
+
+For accessing trace point events perf needs to have read access to
+/sys/kernel/debug/tracing, even when perf_event_paranoid is in a relaxed
+setting.
+
+TRACING
+-------
+
+Some PMUs control advanced hardware tracing capabilities, such as Intel PT,
+that allows low overhead execution tracing. These are described in a separate
+intel-pt.txt document.
+
PARAMETERIZED EVENTS
--------------------
@@ -106,6 +167,50 @@ also be supplied. For example:
perf stat -C 0 -e 'hv_gpci/dtbp_ptitc,phys_processor_idx=0x2/' ...
+EVENT GROUPS
+------------
+
+Perf supports time based multiplexing of events, when the number of events
+active exceeds the number of hardware performance counters. Multiplexing
+can cause measurement errors when the workload changes its execution
+profile.
+
+When metrics are computed using formulas from event counts, it is useful to
+ensure some events are always measured together as a group to minimize multiplexing
+errors. Event groups can be specified using { }.
+
+ perf stat -e '{instructions,cycles}' ...
+
+The number of available performance counters depend on the CPU. A group
+cannot contain more events than available counters.
+For example Intel Core CPUs typically have four generic performance counters
+for the core, plus three fixed counters for instructions, cycles and
+ref-cycles. Some special events have restrictions on which counter they
+can schedule, and may not support multiple instances in a single group.
+When too many events are specified in the group none of them will not
+be measured.
+
+Globally pinned events can limit the number of counters available for
+other groups. On x86 systems, the NMI watchdog pins a counter by default.
+The nmi watchdog can be disabled as root with
+
+ echo 0 > /proc/sys/kernel/nmi_watchdog
+
+Events from multiple different PMUs cannot be mixed in a group, with
+some exceptions for software events.
+
+LEADER SAMPLING
+---------------
+
+perf also supports group leader sampling using the :S specifier.
+
+ perf record -e '{cycles,instructions}:S' ...
+ perf report --group
+
+Normally all events in a event group sample, but with :S only
+the first event (the leader) samples, and it only reads the values of the
+other events in the group.
+
OPTIONS
-------
@@ -143,5 +248,5 @@ SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-top[1],
linkperf:perf-record[1],
-http://www.intel.com/Assets/PDF/manual/253669.pdf[Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide],
+http://www.intel.com/sdm/[Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide],
http://support.amd.com/us/Processor_TechDocs/24593_APM_v2.pdf[AMD64 Architecture Programmer’s Manual Volume 2: System Programming]
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt
index 43310d8661fe..1d6092c460dd 100644
--- a/tools/perf/Documentation/perf-mem.txt
+++ b/tools/perf/Documentation/perf-mem.txt
@@ -48,6 +48,14 @@ OPTIONS
option can be passed in record mode. It will be interpreted the same way as perf
record.
+-K::
+--all-kernel::
+ Configure all used events to run in kernel space.
+
+-U::
+--all-user::
+ Configure all used events to run in user space.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1]
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 19aa17532a16..8dbee832abd9 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -347,6 +347,19 @@ Configure all used events to run in kernel space.
--all-user::
Configure all used events to run in user space.
+--timestamp-filename
+Append timestamp to output file name.
+
+--switch-output::
+Generate multiple perf.data files, timestamp prefixed, switching to a new one
+when receiving a SIGUSR2.
+
+A possible use case is to, given an external event, slice the perf.data file
+that gets then processed, possibly via a perf script, to decide if that
+particular perf.data snapshot should be kept or not.
+
+Implies --timestamp-filename, --no-buildid and --no-buildid-cache.
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 12113992ac9d..ebaf849e30ef 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -248,7 +248,7 @@ OPTIONS
Note that when using the --itrace option the synthesized callchain size
will override this value if the synthesized callchain size is bigger.
- Default: 127
+ Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
-G::
--inverted::
@@ -285,7 +285,7 @@ OPTIONS
-f::
--force::
- Don't complain, do it.
+ Don't do ownership validation.
--symfs=<directory>::
Look for files with symbols relative to this directory.
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index 8ff4df956951..1cc08cc47ac5 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -50,6 +50,22 @@ OPTIONS
--dump-raw-trace=::
Display verbose dump of the sched data.
+OPTIONS for 'perf sched map'
+----------------------------
+
+--compact::
+ Show only CPUs with activity. Helps visualizing on high core
+ count systems.
+
+--cpus::
+ Show just entries with activities for the given CPUs.
+
+--color-cpus::
+ Highlight the given cpus.
+
+--color-pids::
+ Highlight the given pids.
+
SEE ALSO
--------
linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 382ddfb45d1d..a856a1095893 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -259,9 +259,23 @@ include::itrace.txt[]
--full-source-path::
Show the full path for source files for srcline output.
+--max-stack::
+ Set the stack depth limit when parsing the callchain, anything
+ beyond the specified depth will be ignored. This is a trade-off
+ between information loss and faster processing especially for
+ workloads that can have a very long callchain stack.
+ Note that when using the --itrace option the synthesized callchain size
+ will override this value if the synthesized callchain size is bigger.
+
+ Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
+
--ns::
Use 9 decimal places when displaying time (i.e. show the nanoseconds)
+-f::
+--force::
+ Don't do ownership validation.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-script-perl[1],
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 19f046f027cd..91d638df3a6b 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -177,7 +177,7 @@ Default is to monitor all CPUS.
between information loss and faster processing especially for
workloads that can have a very long callchain stack.
- Default: 127
+ Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
--ignore-callees=<regex>::
Ignore callees of the function(s) matching the given regex.
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 13293de8869f..6afe20121bc0 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -117,9 +117,41 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
--syscalls::
Trace system calls. This options is enabled by default.
+--call-graph [mode,type,min[,limit],order[,key][,branch]]::
+ Setup and enable call-graph (stack chain/backtrace) recording.
+ See `--call-graph` section in perf-record and perf-report
+ man pages for details. The ones that are most useful in 'perf trace'
+ are 'dwarf' and 'lbr', where available, try: 'perf trace --call-graph dwarf'.
+
+ Using this will, for the root user, bump the value of --mmap-pages to 4
+ times the maximum for non-root users, based on the kernel.perf_event_mlock_kb
+ sysctl. This is done only if the user doesn't specify a --mmap-pages value.
+
+--kernel-syscall-graph::
+ Show the kernel callchains on the syscall exit path.
+
--event::
Trace other events, see 'perf list' for a complete list.
+--max-stack::
+ Set the stack depth limit when parsing the callchain, anything
+ beyond the specified depth will be ignored. Note that at this point
+ this is just about the presentation part, i.e. the kernel is still
+ not limiting, the overhead of callchains needs to be set via the
+ knobs in --call-graph dwarf.
+
+ Implies '--call-graph dwarf' when --call-graph not present on the
+ command line, on systems where DWARF unwinding was built in.
+
+ Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
+
+--min-stack::
+ Set the stack depth limit when parsing the callchain, anything
+ below the specified depth will be ignored. Disabled by default.
+
+ Implies '--call-graph dwarf' when --call-graph not present on the
+ command line, on systems where DWARF unwinding was built in.
+
--proc-map-timeout::
When processing pre-existing threads /proc/XXX/mmap, it may take a long time,
because the file may be huge. A time out is needed in such cases.
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 000ea210389d..bde8cbae7dd9 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -183,6 +183,11 @@ endif
include config/Makefile
endif
+ifeq ($(config),0)
+include $(srctree)/tools/scripts/Makefile.arch
+-include arch/$(ARCH)/Makefile
+endif
+
# The FEATURE_DUMP_EXPORT holds location of the actual
# FEATURE_DUMP file to be used to bypass feature detection
# (for bpf or any other subproject)
@@ -297,8 +302,6 @@ endif
# because maintaining the nesting to match is a pain. If
# we had "elif" things would have been much nicer...
--include arch/$(ARCH)/Makefile
-
ifneq ($(OUTPUT),)
CFLAGS += -I$(OUTPUT)
endif
@@ -390,7 +393,7 @@ endif
__build-dir = $(subst $(OUTPUT),,$(dir $@))
build-dir = $(if $(__build-dir),$(__build-dir),.)
-prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h fixdep
+prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h fixdep archheaders
$(OUTPUT)%.o: %.c prepare FORCE
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
@@ -430,7 +433,7 @@ $(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)
LIBPERF_IN := $(OUTPUT)libperf-in.o
-$(LIBPERF_IN): fixdep FORCE
+$(LIBPERF_IN): prepare fixdep FORCE
$(Q)$(MAKE) $(build)=libperf
$(LIB_FILE): $(LIBPERF_IN)
@@ -625,7 +628,7 @@ config-clean:
$(call QUIET_CLEAN, config)
$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ $(if $(OUTPUT),OUTPUT=$(OUTPUT)feature/,) clean >/dev/null
-clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean config-clean
+clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean config-clean
$(call QUIET_CLEAN, core-objs) $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
$(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
$(Q)$(RM) $(OUTPUT).config-detected
@@ -662,5 +665,5 @@ FORCE:
.PHONY: all install clean config-clean strip install-gtk
.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
.PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope FORCE prepare
-.PHONY: libtraceevent_plugins
+.PHONY: libtraceevent_plugins archheaders
diff --git a/tools/perf/arch/powerpc/Makefile b/tools/perf/arch/powerpc/Makefile
index 56e05f126ad8..cc3930904d68 100644
--- a/tools/perf/arch/powerpc/Makefile
+++ b/tools/perf/arch/powerpc/Makefile
@@ -3,4 +3,5 @@ PERF_HAVE_DWARF_REGS := 1
endif
HAVE_KVM_STAT_SUPPORT := 1
+PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
PERF_HAVE_JITDUMP := 1
diff --git a/tools/perf/arch/powerpc/util/dwarf-regs.c b/tools/perf/arch/powerpc/util/dwarf-regs.c
index 733151cdf46e..41bdf9530d82 100644
--- a/tools/perf/arch/powerpc/util/dwarf-regs.c
+++ b/tools/perf/arch/powerpc/util/dwarf-regs.c
@@ -10,19 +10,26 @@
*/
#include <stddef.h>
+#include <errno.h>
+#include <string.h>
#include <dwarf-regs.h>
-
+#include <linux/ptrace.h>
+#include <linux/kernel.h>
+#include "util.h"
struct pt_regs_dwarfnum {
const char *name;
unsigned int dwarfnum;
+ unsigned int ptregs_offset;
};
-#define STR(s) #s
-#define REG_DWARFNUM_NAME(r, num) {.name = r, .dwarfnum = num}
-#define GPR_DWARFNUM_NAME(num) \
- {.name = STR(%gpr##num), .dwarfnum = num}
-#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0}
+#define REG_DWARFNUM_NAME(r, num) \
+ {.name = STR(%)STR(r), .dwarfnum = num, \
+ .ptregs_offset = offsetof(struct pt_regs, r)}
+#define GPR_DWARFNUM_NAME(num) \
+ {.name = STR(%gpr##num), .dwarfnum = num, \
+ .ptregs_offset = offsetof(struct pt_regs, gpr[num])}
+#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0, .ptregs_offset = 0}
/*
* Reference:
@@ -61,12 +68,12 @@ static const struct pt_regs_dwarfnum regdwarfnum_table[] = {
GPR_DWARFNUM_NAME(29),
GPR_DWARFNUM_NAME(30),
GPR_DWARFNUM_NAME(31),
- REG_DWARFNUM_NAME("%msr", 66),
- REG_DWARFNUM_NAME("%ctr", 109),
- REG_DWARFNUM_NAME("%link", 108),
- REG_DWARFNUM_NAME("%xer", 101),
- REG_DWARFNUM_NAME("%dar", 119),
- REG_DWARFNUM_NAME("%dsisr", 118),
+ REG_DWARFNUM_NAME(msr, 66),
+ REG_DWARFNUM_NAME(ctr, 109),
+ REG_DWARFNUM_NAME(link, 108),
+ REG_DWARFNUM_NAME(xer, 101),
+ REG_DWARFNUM_NAME(dar, 119),
+ REG_DWARFNUM_NAME(dsisr, 118),
REG_DWARFNUM_END,
};
@@ -86,3 +93,12 @@ const char *get_arch_regstr(unsigned int n)
return roff->name;
return NULL;
}
+
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_dwarfnum *roff;
+ for (roff = regdwarfnum_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->ptregs_offset;
+ return -EINVAL;
+}
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index bbc1a50768dd..c6d0f91731a1 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -19,12 +19,6 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
ehdr.e_type == ET_DYN;
}
-#if defined(_CALL_ELF) && _CALL_ELF == 2
-void arch__elf_sym_adjust(GElf_Sym *sym)
-{
- sym->st_value += PPC64_LOCAL_ENTRY_OFFSET(sym->st_other);
-}
-#endif
#endif
#if !defined(_CALL_ELF) || _CALL_ELF != 2
@@ -65,18 +59,45 @@ bool arch__prefers_symtab(void)
return true;
}
+#ifdef HAVE_LIBELF_SUPPORT
+void arch__sym_update(struct symbol *s, GElf_Sym *sym)
+{
+ s->arch_sym = sym->st_other;
+}
+#endif
+
#define PPC64LE_LEP_OFFSET 8
void arch__fix_tev_from_maps(struct perf_probe_event *pev,
- struct probe_trace_event *tev, struct map *map)
+ struct probe_trace_event *tev, struct map *map,
+ struct symbol *sym)
{
+ int lep_offset;
+
/*
- * ppc64 ABIv2 local entry point is currently always 2 instructions
- * (8 bytes) after the global entry point.
+ * When probing at a function entry point, we normally always want the
+ * LEP since that catches calls to the function through both the GEP and
+ * the LEP. Hence, we would like to probe at an offset of 8 bytes if
+ * the user only specified the function entry.
+ *
+ * However, if the user specifies an offset, we fall back to using the
+ * GEP since all userspace applications (objdump/readelf) show function
+ * disassembly with offsets from the GEP.
+ *
+ * In addition, we shouldn't specify an offset for kretprobes.
*/
- if (!pev->uprobes && map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) {
- tev->point.address += PPC64LE_LEP_OFFSET;
+ if (pev->point.offset || pev->point.retprobe || !map || !sym)
+ return;
+
+ lep_offset = PPC64_LOCAL_ENTRY_OFFSET(sym->arch_sym);
+
+ if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS)
tev->point.offset += PPC64LE_LEP_OFFSET;
+ else if (lep_offset) {
+ if (pev->uprobes)
+ tev->point.address += lep_offset;
+ else
+ tev->point.offset += lep_offset;
}
}
#endif
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile
index 269af2143735..6c9211b18ec0 100644
--- a/tools/perf/arch/x86/Makefile
+++ b/tools/perf/arch/x86/Makefile
@@ -4,3 +4,26 @@ endif
HAVE_KVM_STAT_SUPPORT := 1
PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
PERF_HAVE_JITDUMP := 1
+
+###
+# Syscall table generation
+#
+
+out := $(OUTPUT)arch/x86/include/generated/asm
+header := $(out)/syscalls_64.c
+sys := $(srctree)/tools/perf/arch/x86/entry/syscalls
+systbl := $(sys)/syscalltbl.sh
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
+
+$(header): $(sys)/syscall_64.tbl $(systbl)
+ @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
+ (diff -B arch/x86/entry/syscalls/syscall_64.tbl ../../arch/x86/entry/syscalls/syscall_64.tbl >/dev/null) \
+ || echo "Warning: x86_64's syscall_64.tbl differs from kernel" >&2 )) || true
+ $(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@
+
+clean::
+ $(call QUIET_CLEAN, x86) $(RM) $(header)
+
+archheaders: $(header)
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
new file mode 100644
index 000000000000..cac6d17ce5db
--- /dev/null
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -0,0 +1,376 @@
+#
+# 64-bit system call numbers and entry vectors
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The abi is "common", "64" or "x32" for this file.
+#
+0 common read sys_read
+1 common write sys_write
+2 common open sys_open
+3 common close sys_close
+4 common stat sys_newstat
+5 common fstat sys_newfstat
+6 common lstat sys_newlstat
+7 common poll sys_poll
+8 common lseek sys_lseek
+9 common mmap sys_mmap
+10 common mprotect sys_mprotect
+11 common munmap sys_munmap
+12 common brk sys_brk
+13 64 rt_sigaction sys_rt_sigaction
+14 common rt_sigprocmask sys_rt_sigprocmask
+15 64 rt_sigreturn sys_rt_sigreturn/ptregs
+16 64 ioctl sys_ioctl
+17 common pread64 sys_pread64
+18 common pwrite64 sys_pwrite64
+19 64 readv sys_readv
+20 64 writev sys_writev
+21 common access sys_access
+22 common pipe sys_pipe
+23 common select sys_select
+24 common sched_yield sys_sched_yield
+25 common mremap sys_mremap
+26 common msync sys_msync
+27 common mincore sys_mincore
+28 common madvise sys_madvise
+29 common shmget sys_shmget
+30 common shmat sys_shmat
+31 common shmctl sys_shmctl
+32 common dup sys_dup
+33 common dup2 sys_dup2
+34 common pause sys_pause
+35 common nanosleep sys_nanosleep
+36 common getitimer sys_getitimer
+37 common alarm sys_alarm
+38 common setitimer sys_setitimer
+39 common getpid sys_getpid
+40 common sendfile sys_sendfile64
+41 common socket sys_socket
+42 common connect sys_connect
+43 common accept sys_accept
+44 common sendto sys_sendto
+45 64 recvfrom sys_recvfrom
+46 64 sendmsg sys_sendmsg
+47 64 recvmsg sys_recvmsg
+48 common shutdown sys_shutdown
+49 common bind sys_bind
+50 common listen sys_listen
+51 common getsockname sys_getsockname
+52 common getpeername sys_getpeername
+53 common socketpair sys_socketpair
+54 64 setsockopt sys_setsockopt
+55 64 getsockopt sys_getsockopt
+56 common clone sys_clone/ptregs
+57 common fork sys_fork/ptregs
+58 common vfork sys_vfork/ptregs
+59 64 execve sys_execve/ptregs
+60 common exit sys_exit
+61 common wait4 sys_wait4
+62 common kill sys_kill
+63 common uname sys_newuname
+64 common semget sys_semget
+65 common semop sys_semop
+66 common semctl sys_semctl
+67 common shmdt sys_shmdt
+68 common msgget sys_msgget
+69 common msgsnd sys_msgsnd
+70 common msgrcv sys_msgrcv
+71 common msgctl sys_msgctl
+72 common fcntl sys_fcntl
+73 common flock sys_flock
+74 common fsync sys_fsync
+75 common fdatasync sys_fdatasync
+76 common truncate sys_truncate
+77 common ftruncate sys_ftruncate
+78 common getdents sys_getdents
+79 common getcwd sys_getcwd
+80 common chdir sys_chdir
+81 common fchdir sys_fchdir
+82 common rename sys_rename
+83 common mkdir sys_mkdir
+84 common rmdir sys_rmdir
+85 common creat sys_creat
+86 common link sys_link
+87 common unlink sys_unlink
+88 common symlink sys_symlink
+89 common readlink sys_readlink
+90 common chmod sys_chmod
+91 common fchmod sys_fchmod
+92 common chown sys_chown
+93 common fchown sys_fchown
+94 common lchown sys_lchown
+95 common umask sys_umask
+96 common gettimeofday sys_gettimeofday
+97 common getrlimit sys_getrlimit
+98 common getrusage sys_getrusage
+99 common sysinfo sys_sysinfo
+100 common times sys_times
+101 64 ptrace sys_ptrace
+102 common getuid sys_getuid
+103 common syslog sys_syslog
+104 common getgid sys_getgid
+105 common setuid sys_setuid
+106 common setgid sys_setgid
+107 common geteuid sys_geteuid
+108 common getegid sys_getegid
+109 common setpgid sys_setpgid
+110 common getppid sys_getppid
+111 common getpgrp sys_getpgrp
+112 common setsid sys_setsid
+113 common setreuid sys_setreuid
+114 common setregid sys_setregid
+115 common getgroups sys_getgroups
+116 common setgroups sys_setgroups
+117 common setresuid sys_setresuid
+118 common getresuid sys_getresuid
+119 common setresgid sys_setresgid
+120 common getresgid sys_getresgid
+121 common getpgid sys_getpgid
+122 common setfsuid sys_setfsuid
+123 common setfsgid sys_setfsgid
+124 common getsid sys_getsid
+125 common capget sys_capget
+126 common capset sys_capset
+127 64 rt_sigpending sys_rt_sigpending
+128 64 rt_sigtimedwait sys_rt_sigtimedwait
+129 64 rt_sigqueueinfo sys_rt_sigqueueinfo
+130 common rt_sigsuspend sys_rt_sigsuspend
+131 64 sigaltstack sys_sigaltstack
+132 common utime sys_utime
+133 common mknod sys_mknod
+134 64 uselib
+135 common personality sys_personality
+136 common ustat sys_ustat
+137 common statfs sys_statfs
+138 common fstatfs sys_fstatfs
+139 common sysfs sys_sysfs
+140 common getpriority sys_getpriority
+141 common setpriority sys_setpriority
+142 common sched_setparam sys_sched_setparam
+143 common sched_getparam sys_sched_getparam
+144 common sched_setscheduler sys_sched_setscheduler
+145 common sched_getscheduler sys_sched_getscheduler
+146 common sched_get_priority_max sys_sched_get_priority_max
+147 common sched_get_priority_min sys_sched_get_priority_min
+148 common sched_rr_get_interval sys_sched_rr_get_interval
+149 common mlock sys_mlock
+150 common munlock sys_munlock
+151 common mlockall sys_mlockall
+152 common munlockall sys_munlockall
+153 common vhangup sys_vhangup
+154 common modify_ldt sys_modify_ldt
+155 common pivot_root sys_pivot_root
+156 64 _sysctl sys_sysctl
+157 common prctl sys_prctl
+158 common arch_prctl sys_arch_prctl
+159 common adjtimex sys_adjtimex
+160 common setrlimit sys_setrlimit
+161 common chroot sys_chroot
+162 common sync sys_sync
+163 common acct sys_acct
+164 common settimeofday sys_settimeofday
+165 common mount sys_mount
+166 common umount2 sys_umount
+167 common swapon sys_swapon
+168 common swapoff sys_swapoff
+169 common reboot sys_reboot
+170 common sethostname sys_sethostname
+171 common setdomainname sys_setdomainname
+172 common iopl sys_iopl/ptregs
+173 common ioperm sys_ioperm
+174 64 create_module
+175 common init_module sys_init_module
+176 common delete_module sys_delete_module
+177 64 get_kernel_syms
+178 64 query_module
+179 common quotactl sys_quotactl
+180 64 nfsservctl
+181 common getpmsg
+182 common putpmsg
+183 common afs_syscall
+184 common tuxcall
+185 common security
+186 common gettid sys_gettid
+187 common readahead sys_readahead
+188 common setxattr sys_setxattr
+189 common lsetxattr sys_lsetxattr
+190 common fsetxattr sys_fsetxattr
+191 common getxattr sys_getxattr
+192 common lgetxattr sys_lgetxattr
+193 common fgetxattr sys_fgetxattr
+194 common listxattr sys_listxattr
+195 common llistxattr sys_llistxattr
+196 common flistxattr sys_flistxattr
+197 common removexattr sys_removexattr
+198 common lremovexattr sys_lremovexattr
+199 common fremovexattr sys_fremovexattr
+200 common tkill sys_tkill
+201 common time sys_time
+202 common futex sys_futex
+203 common sched_setaffinity sys_sched_setaffinity
+204 common sched_getaffinity sys_sched_getaffinity
+205 64 set_thread_area
+206 64 io_setup sys_io_setup
+207 common io_destroy sys_io_destroy
+208 common io_getevents sys_io_getevents
+209 64 io_submit sys_io_submit
+210 common io_cancel sys_io_cancel
+211 64 get_thread_area
+212 common lookup_dcookie sys_lookup_dcookie
+213 common epoll_create sys_epoll_create
+214 64 epoll_ctl_old
+215 64 epoll_wait_old
+216 common remap_file_pages sys_remap_file_pages
+217 common getdents64 sys_getdents64
+218 common set_tid_address sys_set_tid_address
+219 common restart_syscall sys_restart_syscall
+220 common semtimedop sys_semtimedop
+221 common fadvise64 sys_fadvise64
+222 64 timer_create sys_timer_create
+223 common timer_settime sys_timer_settime
+224 common timer_gettime sys_timer_gettime
+225 common timer_getoverrun sys_timer_getoverrun
+226 common timer_delete sys_timer_delete
+227 common clock_settime sys_clock_settime
+228 common clock_gettime sys_clock_gettime
+229 common clock_getres sys_clock_getres
+230 common clock_nanosleep sys_clock_nanosleep
+231 common exit_group sys_exit_group
+232 common epoll_wait sys_epoll_wait
+233 common epoll_ctl sys_epoll_ctl
+234 common tgkill sys_tgkill
+235 common utimes sys_utimes
+236 64 vserver
+237 common mbind sys_mbind
+238 common set_mempolicy sys_set_mempolicy
+239 common get_mempolicy sys_get_mempolicy
+240 common mq_open sys_mq_open
+241 common mq_unlink sys_mq_unlink
+242 common mq_timedsend sys_mq_timedsend
+243 common mq_timedreceive sys_mq_timedreceive
+244 64 mq_notify sys_mq_notify
+245 common mq_getsetattr sys_mq_getsetattr
+246 64 kexec_load sys_kexec_load
+247 64 waitid sys_waitid
+248 common add_key sys_add_key
+249 common request_key sys_request_key
+250 common keyctl sys_keyctl
+251 common ioprio_set sys_ioprio_set
+252 common ioprio_get sys_ioprio_get
+253 common inotify_init sys_inotify_init
+254 common inotify_add_watch sys_inotify_add_watch
+255 common inotify_rm_watch sys_inotify_rm_watch
+256 common migrate_pages sys_migrate_pages
+257 common openat sys_openat
+258 common mkdirat sys_mkdirat
+259 common mknodat sys_mknodat
+260 common fchownat sys_fchownat
+261 common futimesat sys_futimesat
+262 common newfstatat sys_newfstatat
+263 common unlinkat sys_unlinkat
+264 common renameat sys_renameat
+265 common linkat sys_linkat
+266 common symlinkat sys_symlinkat
+267 common readlinkat sys_readlinkat
+268 common fchmodat sys_fchmodat
+269 common faccessat sys_faccessat
+270 common pselect6 sys_pselect6
+271 common ppoll sys_ppoll
+272 common unshare sys_unshare
+273 64 set_robust_list sys_set_robust_list
+274 64 get_robust_list sys_get_robust_list
+275 common splice sys_splice
+276 common tee sys_tee
+277 common sync_file_range sys_sync_file_range
+278 64 vmsplice sys_vmsplice
+279 64 move_pages sys_move_pages
+280 common utimensat sys_utimensat
+281 common epoll_pwait sys_epoll_pwait
+282 common signalfd sys_signalfd
+283 common timerfd_create sys_timerfd_create
+284 common eventfd sys_eventfd
+285 common fallocate sys_fallocate
+286 common timerfd_settime sys_timerfd_settime
+287 common timerfd_gettime sys_timerfd_gettime
+288 common accept4 sys_accept4
+289 common signalfd4 sys_signalfd4
+290 common eventfd2 sys_eventfd2
+291 common epoll_create1 sys_epoll_create1
+292 common dup3 sys_dup3
+293 common pipe2 sys_pipe2
+294 common inotify_init1 sys_inotify_init1
+295 64 preadv sys_preadv
+296 64 pwritev sys_pwritev
+297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+298 common perf_event_open sys_perf_event_open
+299 64 recvmmsg sys_recvmmsg
+300 common fanotify_init sys_fanotify_init
+301 common fanotify_mark sys_fanotify_mark
+302 common prlimit64 sys_prlimit64
+303 common name_to_handle_at sys_name_to_handle_at
+304 common open_by_handle_at sys_open_by_handle_at
+305 common clock_adjtime sys_clock_adjtime
+306 common syncfs sys_syncfs
+307 64 sendmmsg sys_sendmmsg
+308 common setns sys_setns
+309 common getcpu sys_getcpu
+310 64 process_vm_readv sys_process_vm_readv
+311 64 process_vm_writev sys_process_vm_writev
+312 common kcmp sys_kcmp
+313 common finit_module sys_finit_module
+314 common sched_setattr sys_sched_setattr
+315 common sched_getattr sys_sched_getattr
+316 common renameat2 sys_renameat2
+317 common seccomp sys_seccomp
+318 common getrandom sys_getrandom
+319 common memfd_create sys_memfd_create
+320 common kexec_file_load sys_kexec_file_load
+321 common bpf sys_bpf
+322 64 execveat sys_execveat/ptregs
+323 common userfaultfd sys_userfaultfd
+324 common membarrier sys_membarrier
+325 common mlock2 sys_mlock2
+326 common copy_file_range sys_copy_file_range
+327 64 preadv2 sys_preadv2
+328 64 pwritev2 sys_pwritev2
+
+#
+# x32-specific system call numbers start at 512 to avoid cache impact
+# for native 64-bit operation.
+#
+512 x32 rt_sigaction compat_sys_rt_sigaction
+513 x32 rt_sigreturn sys32_x32_rt_sigreturn
+514 x32 ioctl compat_sys_ioctl
+515 x32 readv compat_sys_readv
+516 x32 writev compat_sys_writev
+517 x32 recvfrom compat_sys_recvfrom
+518 x32 sendmsg compat_sys_sendmsg
+519 x32 recvmsg compat_sys_recvmsg
+520 x32 execve compat_sys_execve/ptregs
+521 x32 ptrace compat_sys_ptrace
+522 x32 rt_sigpending compat_sys_rt_sigpending
+523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait
+524 x32 rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+525 x32 sigaltstack compat_sys_sigaltstack
+526 x32 timer_create compat_sys_timer_create
+527 x32 mq_notify compat_sys_mq_notify
+528 x32 kexec_load compat_sys_kexec_load
+529 x32 waitid compat_sys_waitid
+530 x32 set_robust_list compat_sys_set_robust_list
+531 x32 get_robust_list compat_sys_get_robust_list
+532 x32 vmsplice compat_sys_vmsplice
+533 x32 move_pages compat_sys_move_pages
+534 x32 preadv compat_sys_preadv64
+535 x32 pwritev compat_sys_pwritev64
+536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+537 x32 recvmmsg compat_sys_recvmmsg
+538 x32 sendmmsg compat_sys_sendmmsg
+539 x32 process_vm_readv compat_sys_process_vm_readv
+540 x32 process_vm_writev compat_sys_process_vm_writev
+541 x32 setsockopt compat_sys_setsockopt
+542 x32 getsockopt compat_sys_getsockopt
+543 x32 io_setup compat_sys_io_setup
+544 x32 io_submit compat_sys_io_submit
+545 x32 execveat compat_sys_execveat/ptregs
diff --git a/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh b/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh
new file mode 100755
index 000000000000..49a18b9ad9cf
--- /dev/null
+++ b/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+in="$1"
+arch="$2"
+
+syscall_macro() {
+ nr="$1"
+ name="$2"
+
+ echo " [$nr] = \"$name\","
+}
+
+emit() {
+ nr="$1"
+ entry="$2"
+
+ syscall_macro "$nr" "$entry"
+}
+
+echo "static const char *syscalltbl_${arch}[] = {"
+
+sorted_table=$(mktemp /tmp/syscalltbl.XXXXXX)
+grep '^[0-9]' "$in" | sort -n > $sorted_table
+
+max_nr=0
+while read nr abi name entry compat; do
+ if [ $nr -ge 512 ] ; then # discard compat sycalls
+ break
+ fi
+
+ emit "$nr" "$name"
+ max_nr=$nr
+done < $sorted_table
+
+rm -f $sorted_table
+
+echo "};"
+
+echo "#define SYSCALLTBL_${arch}_MAX_ID ${max_nr}"
diff --git a/tools/perf/arch/x86/tests/perf-time-to-tsc.c b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
index 9d29ee283ac5..d4aa567a29c4 100644
--- a/tools/perf/arch/x86/tests/perf-time-to-tsc.c
+++ b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
@@ -71,7 +71,7 @@ int test__perf_time_to_tsc(int subtest __maybe_unused)
CHECK__(parse_events(evlist, "cycles:u", NULL));
- perf_evlist__config(evlist, &opts);
+ perf_evlist__config(evlist, &opts, NULL);
evsel = perf_evlist__first(evlist);
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
index d66f9ad4df2e..7dc30637cf66 100644
--- a/tools/perf/arch/x86/util/intel-bts.c
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -438,6 +438,11 @@ struct auxtrace_record *intel_bts_recording_init(int *err)
if (!intel_bts_pmu)
return NULL;
+ if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) {
+ *err = -errno;
+ return NULL;
+ }
+
btsr = zalloc(sizeof(struct intel_bts_recording));
if (!btsr) {
*err = -ENOMEM;
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index a3395179c9ee..a07b9605e93b 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -1027,6 +1027,11 @@ struct auxtrace_record *intel_pt_recording_init(int *err)
if (!intel_pt_pmu)
return NULL;
+ if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) {
+ *err = -errno;
+ return NULL;
+ }
+
ptr = zalloc(sizeof(struct intel_pt_recording));
if (!ptr) {
*err = -ENOMEM;
diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c
index fd2868490d00..357f1b13b5ae 100644
--- a/tools/perf/arch/x86/util/tsc.c
+++ b/tools/perf/arch/x86/util/tsc.c
@@ -7,7 +7,6 @@
#include <linux/types.h>
#include "../../util/debug.h"
#include "../../util/tsc.h"
-#include "tsc.h"
int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
struct perf_tsc_conversion *tc)
@@ -46,3 +45,34 @@ u64 rdtsc(void)
return low | ((u64)high) << 32;
}
+
+int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc,
+ struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ union perf_event event = {
+ .time_conv = {
+ .header = {
+ .type = PERF_RECORD_TIME_CONV,
+ .size = sizeof(struct time_conv_event),
+ },
+ },
+ };
+ struct perf_tsc_conversion tc;
+ int err;
+
+ err = perf_read_tsc_conversion(pc, &tc);
+ if (err == -EOPNOTSUPP)
+ return 0;
+ if (err)
+ return err;
+
+ pr_debug2("Synthesizing TSC conversion information\n");
+
+ event.time_conv.time_mult = tc.time_mult;
+ event.time_conv.time_shift = tc.time_shift;
+ event.time_conv.time_zero = tc.time_zero;
+
+ return process(tool, &event, NULL, machine);
+}
diff --git a/tools/perf/arch/x86/util/tsc.h b/tools/perf/arch/x86/util/tsc.h
deleted file mode 100644
index 2edc4d31065c..000000000000
--- a/tools/perf/arch/x86/util/tsc.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef TOOLS_PERF_ARCH_X86_UTIL_TSC_H__
-#define TOOLS_PERF_ARCH_X86_UTIL_TSC_H__
-
-#include <linux/types.h>
-
-struct perf_tsc_conversion {
- u16 time_shift;
- u32 time_mult;
- u64 time_zero;
-};
-
-struct perf_event_mmap_page;
-
-int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
- struct perf_tsc_conversion *tc);
-
-#endif /* TOOLS_PERF_ARCH_X86_UTIL_TSC_H__ */
diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
index 6a18ce21f865..6952db65508a 100644
--- a/tools/perf/bench/futex-lock-pi.c
+++ b/tools/perf/bench/futex-lock-pi.c
@@ -83,7 +83,7 @@ static void *workerfn(void *arg)
do {
int ret;
again:
- ret = futex_lock_pi(w->futex, NULL, 0, futex_flag);
+ ret = futex_lock_pi(w->futex, NULL, futex_flag);
if (ret) { /* handle lock acquisition */
if (!silent)
diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h
index d44de9f44281..b2e06d1190d0 100644
--- a/tools/perf/bench/futex.h
+++ b/tools/perf/bench/futex.h
@@ -57,13 +57,11 @@ futex_wake(u_int32_t *uaddr, int nr_wake, int opflags)
/**
* futex_lock_pi() - block on uaddr as a PI mutex
- * @detect: whether (1) or not (0) to perform deadlock detection
*/
static inline int
-futex_lock_pi(u_int32_t *uaddr, struct timespec *timeout, int detect,
- int opflags)
+futex_lock_pi(u_int32_t *uaddr, struct timespec *timeout, int opflags)
{
- return futex(uaddr, FUTEX_LOCK_PI, detect, timeout, NULL, 0, opflags);
+ return futex(uaddr, FUTEX_LOCK_PI, 0, timeout, NULL, 0, opflags);
}
/**
diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c
index a91aa85d80ff..2b54d0f2672a 100644
--- a/tools/perf/bench/mem-functions.c
+++ b/tools/perf/bench/mem-functions.c
@@ -6,6 +6,7 @@
* Written by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
*/
+#include "debug.h"
#include "../perf.h"
#include "../util/util.h"
#include <subcmd/parse-options.h>
@@ -63,14 +64,16 @@ static struct perf_event_attr cycle_attr = {
.config = PERF_COUNT_HW_CPU_CYCLES
};
-static void init_cycles(void)
+static int init_cycles(void)
{
cycles_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, perf_event_open_cloexec_flag());
- if (cycles_fd < 0 && errno == ENOSYS)
- die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
- else
- BUG_ON(cycles_fd < 0);
+ if (cycles_fd < 0 && errno == ENOSYS) {
+ pr_debug("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
+ return -1;
+ }
+
+ return cycles_fd;
}
static u64 get_cycles(void)
@@ -155,8 +158,13 @@ static int bench_mem_common(int argc, const char **argv, struct bench_mem_info *
argc = parse_options(argc, argv, options, info->usage, 0);
- if (use_cycles)
- init_cycles();
+ if (use_cycles) {
+ i = init_cycles();
+ if (i < 0) {
+ fprintf(stderr, "Failed to open cycles counter\n");
+ return i;
+ }
+ }
size = (size_t)perf_atoll((char *)size_str);
size_total = (double)size * nr_loops;
diff --git a/tools/perf/builtin-config.c b/tools/perf/builtin-config.c
index c42448ed5dfe..fe1b77fa21f9 100644
--- a/tools/perf/builtin-config.c
+++ b/tools/perf/builtin-config.c
@@ -12,6 +12,7 @@
#include <subcmd/parse-options.h>
#include "util/util.h"
#include "util/debug.h"
+#include "util/config.h"
static bool use_system_config, use_user_config;
@@ -32,13 +33,28 @@ static struct option config_options[] = {
OPT_END()
};
-static int show_config(const char *key, const char *value,
- void *cb __maybe_unused)
+static int show_config(struct perf_config_set *set)
{
- if (value)
- printf("%s=%s\n", key, value);
- else
- printf("%s\n", key);
+ struct perf_config_section *section;
+ struct perf_config_item *item;
+ struct list_head *sections;
+
+ if (set == NULL)
+ return -1;
+
+ sections = &set->sections;
+ if (list_empty(sections))
+ return -1;
+
+ list_for_each_entry(section, sections, node) {
+ list_for_each_entry(item, &section->items, node) {
+ char *value = item->value;
+
+ if (value)
+ printf("%s.%s=%s\n", section->name,
+ item->name, value);
+ }
+ }
return 0;
}
@@ -46,6 +62,7 @@ static int show_config(const char *key, const char *value,
int cmd_config(int argc, const char **argv, const char *prefix __maybe_unused)
{
int ret = 0;
+ struct perf_config_set *set;
char *user_config = mkpath("%s/.perfconfig", getenv("HOME"));
argc = parse_options(argc, argv, config_options, config_usage,
@@ -63,13 +80,19 @@ int cmd_config(int argc, const char **argv, const char *prefix __maybe_unused)
else if (use_user_config)
config_exclusive_filename = user_config;
+ set = perf_config_set__new();
+ if (!set) {
+ ret = -1;
+ goto out_err;
+ }
+
switch (actions) {
case ACTION_LIST:
if (argc) {
pr_err("Error: takes no arguments\n");
parse_options_usage(config_usage, config_options, "l", 1);
} else {
- ret = perf_config(show_config, NULL);
+ ret = show_config(set);
if (ret < 0) {
const char * config_filename = config_exclusive_filename;
if (!config_exclusive_filename)
@@ -83,5 +106,7 @@ int cmd_config(int argc, const char **argv, const char *prefix __maybe_unused)
usage_with_options(config_usage, config_options);
}
+ perf_config_set__delete(set);
+out_err:
return ret;
}
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 8053a8ceefda..9ce354f469dc 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -428,7 +428,7 @@ static void hists__baseline_only(struct hists *hists)
struct rb_root *root;
struct rb_node *next;
- if (sort__need_collapse)
+ if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
@@ -450,7 +450,7 @@ static void hists__precompute(struct hists *hists)
struct rb_root *root;
struct rb_node *next;
- if (sort__need_collapse)
+ if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index bc1de9b8fd67..f9830c902b78 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -61,6 +61,7 @@ static int check_emacsclient_version(void)
struct child_process ec_process;
const char *argv_ec[] = { "emacsclient", "--version", NULL };
int version;
+ int ret = -1;
/* emacsclient prints its version number on stderr */
memset(&ec_process, 0, sizeof(ec_process));
@@ -71,7 +72,10 @@ static int check_emacsclient_version(void)
fprintf(stderr, "Failed to start emacsclient.\n");
return -1;
}
- strbuf_read(&buffer, ec_process.err, 20);
+ if (strbuf_read(&buffer, ec_process.err, 20) < 0) {
+ fprintf(stderr, "Failed to read emacsclient version\n");
+ goto out;
+ }
close(ec_process.err);
/*
@@ -82,8 +86,7 @@ static int check_emacsclient_version(void)
if (prefixcmp(buffer.buf, "emacsclient")) {
fprintf(stderr, "Failed to parse emacsclient version.\n");
- strbuf_release(&buffer);
- return -1;
+ goto out;
}
version = atoi(buffer.buf + strlen("emacsclient"));
@@ -92,12 +95,11 @@ static int check_emacsclient_version(void)
fprintf(stderr,
"emacsclient version '%d' too old (< 22).\n",
version);
- strbuf_release(&buffer);
- return -1;
- }
-
+ } else
+ ret = 0;
+out:
strbuf_release(&buffer);
- return 0;
+ return ret;
}
static void exec_woman_emacs(const char *path, const char *page)
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index d1a2d104f2bc..e5afa8fe1bf1 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -748,6 +748,7 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
.auxtrace_info = perf_event__repipe_op2_synth,
.auxtrace = perf_event__repipe_auxtrace,
.auxtrace_error = perf_event__repipe_op2_synth,
+ .time_conv = perf_event__repipe_op2_synth,
.finished_round = perf_event__repipe_oe_synth,
.build_id = perf_event__repipe_op2_synth,
.id_index = perf_event__repipe_op2_synth,
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index c9cb3be47cff..58adfee230de 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -375,7 +375,7 @@ static u64 find_callsite(struct perf_evsel *evsel, struct perf_sample *sample)
}
al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
- sample__resolve_callchain(sample, NULL, evsel, &al, 16);
+ sample__resolve_callchain(sample, &callchain_cursor, NULL, evsel, &al, 16);
callchain_cursor_commit(&callchain_cursor);
while (true) {
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index bff666458b28..6487c06d2708 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -982,7 +982,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
struct perf_evlist *evlist = kvm->evlist;
char sbuf[STRERR_BUFSIZE];
- perf_evlist__config(evlist, &kvm->opts);
+ perf_evlist__config(evlist, &kvm->opts, NULL);
/*
* Note: exclude_{guest,host} do not apply here.
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 85db3be4b3cb..1dc140c5481d 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -62,19 +62,22 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
int rec_argc, i = 0, j;
const char **rec_argv;
int ret;
+ bool all_user = false, all_kernel = false;
struct option options[] = {
OPT_CALLBACK('e', "event", &mem, "event",
"event selector. use 'perf mem record -e list' to list available events",
parse_record_events),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
+ OPT_BOOLEAN('U', "--all-user", &all_user, "collect only user level data"),
+ OPT_BOOLEAN('K', "--all-kernel", &all_kernel, "collect only kernel level data"),
OPT_END()
};
argc = parse_options(argc, argv, options, record_mem_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- rec_argc = argc + 7; /* max number of arguments */
+ rec_argc = argc + 9; /* max number of arguments */
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (!rec_argv)
return -1;
@@ -103,6 +106,12 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
rec_argv[i++] = perf_mem_events__name(j);
};
+ if (all_user)
+ rec_argv[i++] = "--all-user";
+
+ if (all_kernel)
+ rec_argv[i++] = "--all-kernel";
+
for (j = 0; j < argc; j++, i++)
rec_argv[i] = argv[j];
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 515510ecc76a..f3679c44d3f3 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -29,10 +29,12 @@
#include "util/data.h"
#include "util/perf_regs.h"
#include "util/auxtrace.h"
+#include "util/tsc.h"
#include "util/parse-branch-options.h"
#include "util/parse-regs-options.h"
#include "util/llvm-utils.h"
#include "util/bpf-loader.h"
+#include "util/trigger.h"
#include "asm/bug.h"
#include <unistd.h>
@@ -55,6 +57,8 @@ struct record {
bool no_buildid_cache;
bool no_buildid_cache_set;
bool buildid_all;
+ bool timestamp_filename;
+ bool switch_output;
unsigned long long samples;
};
@@ -124,9 +128,10 @@ out:
static volatile int done;
static volatile int signr = -1;
static volatile int child_finished;
-static volatile int auxtrace_snapshot_enabled;
-static volatile int auxtrace_snapshot_err;
+
static volatile int auxtrace_record__snapshot_started;
+static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
+static DEFINE_TRIGGER(switch_output_trigger);
static void sig_handler(int sig)
{
@@ -244,11 +249,12 @@ static void record__read_auxtrace_snapshot(struct record *rec)
{
pr_debug("Recording AUX area tracing snapshot\n");
if (record__auxtrace_read_snapshot_all(rec) < 0) {
- auxtrace_snapshot_err = -1;
+ trigger_error(&auxtrace_snapshot_trigger);
} else {
- auxtrace_snapshot_err = auxtrace_record__snapshot_finish(rec->itr);
- if (!auxtrace_snapshot_err)
- auxtrace_snapshot_enabled = 1;
+ if (auxtrace_record__snapshot_finish(rec->itr))
+ trigger_error(&auxtrace_snapshot_trigger);
+ else
+ trigger_ready(&auxtrace_snapshot_trigger);
}
}
@@ -283,7 +289,7 @@ static int record__open(struct record *rec)
struct record_opts *opts = &rec->opts;
int rc = 0;
- perf_evlist__config(evlist, opts);
+ perf_evlist__config(evlist, opts, &callchain_param);
evlist__for_each(evlist, pos) {
try_again:
@@ -494,6 +500,73 @@ record__finish_output(struct record *rec)
return;
}
+static int record__synthesize_workload(struct record *rec)
+{
+ struct {
+ struct thread_map map;
+ struct thread_map_data map_data;
+ } thread_map;
+
+ thread_map.map.nr = 1;
+ thread_map.map.map[0].pid = rec->evlist->workload.pid;
+ thread_map.map.map[0].comm = NULL;
+ return perf_event__synthesize_thread_map(&rec->tool, &thread_map.map,
+ process_synthesized_event,
+ &rec->session->machines.host,
+ rec->opts.sample_address,
+ rec->opts.proc_map_timeout);
+}
+
+static int record__synthesize(struct record *rec);
+
+static int
+record__switch_output(struct record *rec, bool at_exit)
+{
+ struct perf_data_file *file = &rec->file;
+ int fd, err;
+
+ /* Same Size: "2015122520103046"*/
+ char timestamp[] = "InvalidTimestamp";
+
+ rec->samples = 0;
+ record__finish_output(rec);
+ err = fetch_current_timestamp(timestamp, sizeof(timestamp));
+ if (err) {
+ pr_err("Failed to get current timestamp\n");
+ return -EINVAL;
+ }
+
+ fd = perf_data_file__switch(file, timestamp,
+ rec->session->header.data_offset,
+ at_exit);
+ if (fd >= 0 && !at_exit) {
+ rec->bytes_written = 0;
+ rec->session->header.data_size = 0;
+ }
+
+ if (!quiet)
+ fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
+ file->path, timestamp);
+
+ /* Output tracking events */
+ if (!at_exit) {
+ record__synthesize(rec);
+
+ /*
+ * In 'perf record --switch-output' without -a,
+ * record__synthesize() in record__switch_output() won't
+ * generate tracking events because there's no thread_map
+ * in evlist. Which causes newly created perf.data doesn't
+ * contain map and comm information.
+ * Create a fake thread_map and directly call
+ * perf_event__synthesize_thread_map() for those events.
+ */
+ if (target__none(&rec->opts.target))
+ record__synthesize_workload(rec);
+ }
+ return fd;
+}
+
static volatile int workload_exec_errno;
/*
@@ -512,6 +585,15 @@ static void workload_exec_failed_signal(int signo __maybe_unused,
static void snapshot_sig_handler(int sig);
+int __weak
+perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
+ struct perf_tool *tool __maybe_unused,
+ perf_event__handler_t process __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ return 0;
+}
+
static int record__synthesize(struct record *rec)
{
struct perf_session *session = rec->session;
@@ -549,6 +631,11 @@ static int record__synthesize(struct record *rec)
}
}
+ err = perf_event__synth_time_conv(rec->evlist->mmap[0].base, tool,
+ process_synthesized_event, machine);
+ if (err)
+ goto out;
+
if (rec->opts.full_auxtrace) {
err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
session, process_synthesized_event);
@@ -600,10 +687,16 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
- if (rec->opts.auxtrace_snapshot_mode)
+
+ if (rec->opts.auxtrace_snapshot_mode || rec->switch_output) {
signal(SIGUSR2, snapshot_sig_handler);
- else
+ if (rec->opts.auxtrace_snapshot_mode)
+ trigger_on(&auxtrace_snapshot_trigger);
+ if (rec->switch_output)
+ trigger_on(&switch_output_trigger);
+ } else {
signal(SIGUSR2, SIG_IGN);
+ }
session = perf_session__new(file, false, tool);
if (session == NULL) {
@@ -729,27 +822,45 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
perf_evlist__enable(rec->evlist);
}
- auxtrace_snapshot_enabled = 1;
+ trigger_ready(&auxtrace_snapshot_trigger);
+ trigger_ready(&switch_output_trigger);
for (;;) {
unsigned long long hits = rec->samples;
if (record__mmap_read_all(rec) < 0) {
- auxtrace_snapshot_enabled = 0;
+ trigger_error(&auxtrace_snapshot_trigger);
+ trigger_error(&switch_output_trigger);
err = -1;
goto out_child;
}
if (auxtrace_record__snapshot_started) {
auxtrace_record__snapshot_started = 0;
- if (!auxtrace_snapshot_err)
+ if (!trigger_is_error(&auxtrace_snapshot_trigger))
record__read_auxtrace_snapshot(rec);
- if (auxtrace_snapshot_err) {
+ if (trigger_is_error(&auxtrace_snapshot_trigger)) {
pr_err("AUX area tracing snapshot failed\n");
err = -1;
goto out_child;
}
}
+ if (trigger_is_hit(&switch_output_trigger)) {
+ trigger_ready(&switch_output_trigger);
+
+ if (!quiet)
+ fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
+ waking);
+ waking = 0;
+ fd = record__switch_output(rec, false);
+ if (fd < 0) {
+ pr_err("Failed to switch to new file\n");
+ trigger_error(&switch_output_trigger);
+ err = fd;
+ goto out_child;
+ }
+ }
+
if (hits == rec->samples) {
if (done || draining)
break;
@@ -772,12 +883,13 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
* disable events in this case.
*/
if (done && !disabled && !target__none(&opts->target)) {
- auxtrace_snapshot_enabled = 0;
+ trigger_off(&auxtrace_snapshot_trigger);
perf_evlist__disable(rec->evlist);
disabled = true;
}
}
- auxtrace_snapshot_enabled = 0;
+ trigger_off(&auxtrace_snapshot_trigger);
+ trigger_off(&switch_output_trigger);
if (forks && workload_exec_errno) {
char msg[STRERR_BUFSIZE];
@@ -811,11 +923,22 @@ out_child:
/* this will be recalculated during process_buildids() */
rec->samples = 0;
- if (!err)
- record__finish_output(rec);
+ if (!err) {
+ if (!rec->timestamp_filename) {
+ record__finish_output(rec);
+ } else {
+ fd = record__switch_output(rec, true);
+ if (fd < 0) {
+ status = fd;
+ goto out_delete_session;
+ }
+ }
+ }
if (!err && !quiet) {
char samples[128];
+ const char *postfix = rec->timestamp_filename ?
+ ".<timestamp>" : "";
if (rec->samples && !rec->opts.full_auxtrace)
scnprintf(samples, sizeof(samples),
@@ -823,9 +946,9 @@ out_child:
else
samples[0] = '\0';
- fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s ]\n",
+ fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
perf_data_file__size(file) / 1024.0 / 1024.0,
- file->path, samples);
+ file->path, postfix, samples);
}
out_delete_session:
@@ -833,58 +956,61 @@ out_delete_session:
return status;
}
-static void callchain_debug(void)
+static void callchain_debug(struct callchain_param *callchain)
{
static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
- pr_debug("callchain: type %s\n", str[callchain_param.record_mode]);
+ pr_debug("callchain: type %s\n", str[callchain->record_mode]);
- if (callchain_param.record_mode == CALLCHAIN_DWARF)
+ if (callchain->record_mode == CALLCHAIN_DWARF)
pr_debug("callchain: stack dump size %d\n",
- callchain_param.dump_size);
+ callchain->dump_size);
}
-int record_parse_callchain_opt(const struct option *opt,
- const char *arg,
- int unset)
+int record_opts__parse_callchain(struct record_opts *record,
+ struct callchain_param *callchain,
+ const char *arg, bool unset)
{
int ret;
- struct record_opts *record = (struct record_opts *)opt->value;
-
- record->callgraph_set = true;
- callchain_param.enabled = !unset;
+ callchain->enabled = !unset;
/* --no-call-graph */
if (unset) {
- callchain_param.record_mode = CALLCHAIN_NONE;
+ callchain->record_mode = CALLCHAIN_NONE;
pr_debug("callchain: disabled\n");
return 0;
}
- ret = parse_callchain_record_opt(arg, &callchain_param);
+ ret = parse_callchain_record_opt(arg, callchain);
if (!ret) {
/* Enable data address sampling for DWARF unwind. */
- if (callchain_param.record_mode == CALLCHAIN_DWARF)
+ if (callchain->record_mode == CALLCHAIN_DWARF)
record->sample_address = true;
- callchain_debug();
+ callchain_debug(callchain);
}
return ret;
}
+int record_parse_callchain_opt(const struct option *opt,
+ const char *arg,
+ int unset)
+{
+ return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
+}
+
int record_callchain_opt(const struct option *opt,
const char *arg __maybe_unused,
int unset __maybe_unused)
{
- struct record_opts *record = (struct record_opts *)opt->value;
+ struct callchain_param *callchain = opt->value;
- record->callgraph_set = true;
- callchain_param.enabled = true;
+ callchain->enabled = true;
- if (callchain_param.record_mode == CALLCHAIN_NONE)
- callchain_param.record_mode = CALLCHAIN_FP;
+ if (callchain->record_mode == CALLCHAIN_NONE)
+ callchain->record_mode = CALLCHAIN_FP;
- callchain_debug();
+ callchain_debug(callchain);
return 0;
}
@@ -1122,7 +1248,7 @@ struct option __record_options[] = {
record__parse_mmap_pages),
OPT_BOOLEAN(0, "group", &record.opts.group,
"put the counters into a counter group"),
- OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
+ OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
NULL, "enables call-graph recording" ,
&record_callchain_opt),
OPT_CALLBACK(0, "call-graph", &record.opts,
@@ -1195,6 +1321,10 @@ struct option __record_options[] = {
"file", "vmlinux pathname"),
OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
"Record build-id of all DSOs regardless of hits"),
+ OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
+ "append timestamp to output filename"),
+ OPT_BOOLEAN(0, "switch-output", &record.switch_output,
+ "Switch output when receive SIGUSR2"),
OPT_END()
};
@@ -1250,6 +1380,9 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
return -EINVAL;
}
+ if (rec->switch_output)
+ rec->timestamp_filename = true;
+
if (!rec->itr) {
rec->itr = auxtrace_record__init(rec->evlist, &err);
if (err)
@@ -1261,6 +1394,14 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
if (err)
return err;
+ err = bpf__setup_stdout(rec->evlist);
+ if (err) {
+ bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
+ pr_err("ERROR: Setup BPF stdout failed: %s\n",
+ errbuf);
+ return err;
+ }
+
err = -ENOMEM;
symbol__init(NULL);
@@ -1275,8 +1416,36 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
- if (rec->no_buildid_cache || rec->no_buildid)
+ if (rec->no_buildid_cache || rec->no_buildid) {
disable_buildid_cache();
+ } else if (rec->switch_output) {
+ /*
+ * In 'perf record --switch-output', disable buildid
+ * generation by default to reduce data file switching
+ * overhead. Still generate buildid if they are required
+ * explicitly using
+ *
+ * perf record --signal-trigger --no-no-buildid \
+ * --no-no-buildid-cache
+ *
+ * Following code equals to:
+ *
+ * if ((rec->no_buildid || !rec->no_buildid_set) &&
+ * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
+ * disable_buildid_cache();
+ */
+ bool disable = true;
+
+ if (rec->no_buildid_set && !rec->no_buildid)
+ disable = false;
+ if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
+ disable = false;
+ if (disable) {
+ rec->no_buildid = true;
+ rec->no_buildid_cache = true;
+ disable_buildid_cache();
+ }
+ }
if (rec->evlist->nr_entries == 0 &&
perf_evlist__add_default(rec->evlist) < 0) {
@@ -1335,9 +1504,13 @@ out_symbol_exit:
static void snapshot_sig_handler(int sig __maybe_unused)
{
- if (!auxtrace_snapshot_enabled)
- return;
- auxtrace_snapshot_enabled = 0;
- auxtrace_snapshot_err = auxtrace_record__snapshot_start(record.itr);
- auxtrace_record__snapshot_started = 1;
+ if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
+ trigger_hit(&auxtrace_snapshot_trigger);
+ auxtrace_record__snapshot_started = 1;
+ if (auxtrace_record__snapshot_start(record.itr))
+ trigger_error(&auxtrace_snapshot_trigger);
+ }
+
+ if (trigger_is_ready(&switch_output_trigger))
+ trigger_hit(&switch_output_trigger);
}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 160ea23b45aa..87d40e3c4078 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -47,7 +47,6 @@ struct report {
struct perf_tool tool;
struct perf_session *session;
bool use_tui, use_gtk, use_stdio;
- bool dont_use_callchains;
bool show_full_info;
bool show_threads;
bool inverted_callchain;
@@ -235,7 +234,7 @@ static int report__setup_sample_type(struct report *rep)
sample_type |= PERF_SAMPLE_BRANCH_STACK;
if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
- if (sort__has_parent) {
+ if (perf_hpp_list.parent) {
ui__error("Selected --sort parent, but no "
"callchain data. Did you call "
"'perf record' without -g?\n");
@@ -247,7 +246,7 @@ static int report__setup_sample_type(struct report *rep)
"you call 'perf record' without -g?\n");
return -1;
}
- } else if (!rep->dont_use_callchains &&
+ } else if (!callchain_param.enabled &&
callchain_param.mode != CHAIN_NONE &&
!symbol_conf.use_callchain) {
symbol_conf.use_callchain = true;
@@ -599,13 +598,15 @@ static int __cmd_report(struct report *rep)
static int
report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
- struct report *rep = (struct report *)opt->value;
+ struct callchain_param *callchain = opt->value;
+ callchain->enabled = !unset;
/*
* --no-call-graph
*/
if (unset) {
- rep->dont_use_callchains = true;
+ symbol_conf.use_callchain = false;
+ callchain->mode = CHAIN_NONE;
return 0;
}
@@ -690,7 +691,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
.ordered_events = true,
.ordering_requires_timestamps = true,
},
- .max_stack = PERF_MAX_STACK_DEPTH,
+ .max_stack = sysctl_perf_event_max_stack,
.pretty_printing_style = "normal",
.socket_filter = -1,
};
@@ -734,7 +735,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
"regex filter to identify parent, see: '--sort parent'"),
OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
"Only display entries with parent-match"),
- OPT_CALLBACK_DEFAULT('g', "call-graph", &report,
+ OPT_CALLBACK_DEFAULT('g', "call-graph", &callchain_param,
"print_type,threshold[,print_limit],order,sort_key[,branch],value",
report_callchain_help, &report_parse_callchain_opt,
callchain_default_opt),
@@ -743,7 +744,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_INTEGER(0, "max-stack", &report.max_stack,
"Set the maximum stack depth when parsing the callchain, "
"anything beyond the specified depth will be ignored. "
- "Default: " __stringify(PERF_MAX_STACK_DEPTH)),
+ "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
OPT_BOOLEAN('G', "inverted", &report.inverted_callchain,
"alias for inverted call graph"),
OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
@@ -935,7 +936,7 @@ repeat:
goto error;
}
- sort__need_collapse = true;
+ perf_hpp_list.need_collapse = true;
}
/* Force tty output for header output and per-thread stat. */
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 871b55ae22a4..afa057666c2a 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -11,6 +11,8 @@
#include "util/session.h"
#include "util/tool.h"
#include "util/cloexec.h"
+#include "util/thread_map.h"
+#include "util/color.h"
#include <subcmd/parse-options.h>
#include "util/trace-event.h"
@@ -122,6 +124,21 @@ struct trace_sched_handler {
struct machine *machine);
};
+#define COLOR_PIDS PERF_COLOR_BLUE
+#define COLOR_CPUS PERF_COLOR_BG_RED
+
+struct perf_sched_map {
+ DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
+ int *comp_cpus;
+ bool comp;
+ struct thread_map *color_pids;
+ const char *color_pids_str;
+ struct cpu_map *color_cpus;
+ const char *color_cpus_str;
+ struct cpu_map *cpus;
+ const char *cpus_str;
+};
+
struct perf_sched {
struct perf_tool tool;
const char *sort_order;
@@ -173,6 +190,7 @@ struct perf_sched {
struct list_head sort_list, cmp_pid;
bool force;
bool skip_merge;
+ struct perf_sched_map map;
};
static u64 get_nsecs(void)
@@ -1339,6 +1357,38 @@ static int process_sched_wakeup_event(struct perf_tool *tool,
return 0;
}
+union map_priv {
+ void *ptr;
+ bool color;
+};
+
+static bool thread__has_color(struct thread *thread)
+{
+ union map_priv priv = {
+ .ptr = thread__priv(thread),
+ };
+
+ return priv.color;
+}
+
+static struct thread*
+map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
+{
+ struct thread *thread = machine__findnew_thread(machine, pid, tid);
+ union map_priv priv = {
+ .color = false,
+ };
+
+ if (!sched->map.color_pids || !thread || thread__priv(thread))
+ return thread;
+
+ if (thread_map__has(sched->map.color_pids, tid))
+ priv.color = true;
+
+ thread__set_priv(thread, priv.ptr);
+ return thread;
+}
+
static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
struct perf_sample *sample, struct machine *machine)
{
@@ -1347,13 +1397,25 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
int new_shortname;
u64 timestamp0, timestamp = sample->time;
s64 delta;
- int cpu, this_cpu = sample->cpu;
+ int i, this_cpu = sample->cpu;
+ int cpus_nr;
+ bool new_cpu = false;
+ const char *color = PERF_COLOR_NORMAL;
BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
if (this_cpu > sched->max_cpu)
sched->max_cpu = this_cpu;
+ if (sched->map.comp) {
+ cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
+ if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) {
+ sched->map.comp_cpus[cpus_nr++] = this_cpu;
+ new_cpu = true;
+ }
+ } else
+ cpus_nr = sched->max_cpu;
+
timestamp0 = sched->cpu_last_switched[this_cpu];
sched->cpu_last_switched[this_cpu] = timestamp;
if (timestamp0)
@@ -1366,7 +1428,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
return -1;
}
- sched_in = machine__findnew_thread(machine, -1, next_pid);
+ sched_in = map__findnew_thread(sched, machine, -1, next_pid);
if (sched_in == NULL)
return -1;
@@ -1400,26 +1462,52 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
new_shortname = 1;
}
- for (cpu = 0; cpu <= sched->max_cpu; cpu++) {
+ for (i = 0; i < cpus_nr; i++) {
+ int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
+ struct thread *curr_thread = sched->curr_thread[cpu];
+ const char *pid_color = color;
+ const char *cpu_color = color;
+
+ if (curr_thread && thread__has_color(curr_thread))
+ pid_color = COLOR_PIDS;
+
+ if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu))
+ continue;
+
+ if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu))
+ cpu_color = COLOR_CPUS;
+
if (cpu != this_cpu)
- printf(" ");
+ color_fprintf(stdout, cpu_color, " ");
else
- printf("*");
+ color_fprintf(stdout, cpu_color, "*");
if (sched->curr_thread[cpu])
- printf("%2s ", sched->curr_thread[cpu]->shortname);
+ color_fprintf(stdout, pid_color, "%2s ", sched->curr_thread[cpu]->shortname);
else
- printf(" ");
+ color_fprintf(stdout, color, " ");
}
- printf(" %12.6f secs ", (double)timestamp/1e9);
+ if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu))
+ goto out;
+
+ color_fprintf(stdout, color, " %12.6f secs ", (double)timestamp/1e9);
if (new_shortname) {
- printf("%s => %s:%d\n",
+ const char *pid_color = color;
+
+ if (thread__has_color(sched_in))
+ pid_color = COLOR_PIDS;
+
+ color_fprintf(stdout, pid_color, "%s => %s:%d",
sched_in->shortname, thread__comm_str(sched_in), sched_in->tid);
- } else {
- printf("\n");
}
+ if (sched->map.comp && new_cpu)
+ color_fprintf(stdout, color, " (CPU %d)", this_cpu);
+
+out:
+ color_fprintf(stdout, color, "\n");
+
thread__put(sched_in);
return 0;
@@ -1675,9 +1763,75 @@ static int perf_sched__lat(struct perf_sched *sched)
return 0;
}
+static int setup_map_cpus(struct perf_sched *sched)
+{
+ struct cpu_map *map;
+
+ sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
+
+ if (sched->map.comp) {
+ sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int));
+ if (!sched->map.comp_cpus)
+ return -1;
+ }
+
+ if (!sched->map.cpus_str)
+ return 0;
+
+ map = cpu_map__new(sched->map.cpus_str);
+ if (!map) {
+ pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
+ return -1;
+ }
+
+ sched->map.cpus = map;
+ return 0;
+}
+
+static int setup_color_pids(struct perf_sched *sched)
+{
+ struct thread_map *map;
+
+ if (!sched->map.color_pids_str)
+ return 0;
+
+ map = thread_map__new_by_tid_str(sched->map.color_pids_str);
+ if (!map) {
+ pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
+ return -1;
+ }
+
+ sched->map.color_pids = map;
+ return 0;
+}
+
+static int setup_color_cpus(struct perf_sched *sched)
+{
+ struct cpu_map *map;
+
+ if (!sched->map.color_cpus_str)
+ return 0;
+
+ map = cpu_map__new(sched->map.color_cpus_str);
+ if (!map) {
+ pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
+ return -1;
+ }
+
+ sched->map.color_cpus = map;
+ return 0;
+}
+
static int perf_sched__map(struct perf_sched *sched)
{
- sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
+ if (setup_map_cpus(sched))
+ return -1;
+
+ if (setup_color_pids(sched))
+ return -1;
+
+ if (setup_color_cpus(sched))
+ return -1;
setup_pager();
if (perf_sched__read_events(sched))
@@ -1831,6 +1985,17 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
"dump raw trace in ASCII"),
OPT_END()
};
+ const struct option map_options[] = {
+ OPT_BOOLEAN(0, "compact", &sched.map.comp,
+ "map output in compact mode"),
+ OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
+ "highlight given pids in map"),
+ OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
+ "highlight given CPUs in map"),
+ OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
+ "display given CPUs in map"),
+ OPT_END()
+ };
const char * const latency_usage[] = {
"perf sched latency [<options>]",
NULL
@@ -1839,6 +2004,10 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
"perf sched replay [<options>]",
NULL
};
+ const char * const map_usage[] = {
+ "perf sched map [<options>]",
+ NULL
+ };
const char *const sched_subcommands[] = { "record", "latency", "map",
"replay", "script", NULL };
const char *sched_usage[] = {
@@ -1887,6 +2056,11 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
setup_sorting(&sched, latency_options, latency_usage);
return perf_sched__lat(&sched);
} else if (!strcmp(argv[0], "map")) {
+ if (argc) {
+ argc = parse_options(argc, argv, map_options, map_usage, 0);
+ if (argc)
+ usage_with_options(map_usage, map_options);
+ }
sched.tp_handler = &map_ops;
setup_sorting(&sched, latency_options, latency_usage);
return perf_sched__map(&sched);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 52826696c852..efca81679bb3 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -22,6 +22,7 @@
#include "util/thread_map.h"
#include "util/stat.h"
#include <linux/bitmap.h>
+#include <linux/stringify.h>
#include "asm/bug.h"
#include "util/mem-events.h"
@@ -317,19 +318,19 @@ static void set_print_ip_opts(struct perf_event_attr *attr)
output[type].print_ip_opts = 0;
if (PRINT_FIELD(IP))
- output[type].print_ip_opts |= PRINT_IP_OPT_IP;
+ output[type].print_ip_opts |= EVSEL__PRINT_IP;
if (PRINT_FIELD(SYM))
- output[type].print_ip_opts |= PRINT_IP_OPT_SYM;
+ output[type].print_ip_opts |= EVSEL__PRINT_SYM;
if (PRINT_FIELD(DSO))
- output[type].print_ip_opts |= PRINT_IP_OPT_DSO;
+ output[type].print_ip_opts |= EVSEL__PRINT_DSO;
if (PRINT_FIELD(SYMOFFSET))
- output[type].print_ip_opts |= PRINT_IP_OPT_SYMOFFSET;
+ output[type].print_ip_opts |= EVSEL__PRINT_SYMOFFSET;
if (PRINT_FIELD(SRCLINE))
- output[type].print_ip_opts |= PRINT_IP_OPT_SRCLINE;
+ output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE;
}
/*
@@ -569,18 +570,23 @@ static void print_sample_bts(struct perf_sample *sample,
/* print branch_from information */
if (PRINT_FIELD(IP)) {
unsigned int print_opts = output[attr->type].print_ip_opts;
+ struct callchain_cursor *cursor = NULL;
- if (symbol_conf.use_callchain && sample->callchain) {
- printf("\n");
- } else {
- printf(" ");
- if (print_opts & PRINT_IP_OPT_SRCLINE) {
+ if (symbol_conf.use_callchain && sample->callchain &&
+ thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
+ sample, NULL, NULL, scripting_max_stack) == 0)
+ cursor = &callchain_cursor;
+
+ if (cursor == NULL) {
+ putchar(' ');
+ if (print_opts & EVSEL__PRINT_SRCLINE) {
print_srcline_last = true;
- print_opts &= ~PRINT_IP_OPT_SRCLINE;
+ print_opts &= ~EVSEL__PRINT_SRCLINE;
}
- }
- perf_evsel__print_ip(evsel, sample, al, print_opts,
- scripting_max_stack);
+ } else
+ putchar('\n');
+
+ sample__fprintf_sym(sample, al, 0, print_opts, cursor, stdout);
}
/* print branch_to information */
@@ -783,14 +789,15 @@ static void process_event(struct perf_script *script,
printf("%16" PRIu64, sample->weight);
if (PRINT_FIELD(IP)) {
- if (!symbol_conf.use_callchain)
- printf(" ");
- else
- printf("\n");
+ struct callchain_cursor *cursor = NULL;
+
+ if (symbol_conf.use_callchain && sample->callchain &&
+ thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
+ sample, NULL, NULL, scripting_max_stack) == 0)
+ cursor = &callchain_cursor;
- perf_evsel__print_ip(evsel, sample, al,
- output[attr->type].print_ip_opts,
- scripting_max_stack);
+ putchar(cursor ? '\n' : ' ');
+ sample__fprintf_sym(sample, al, 0, output[attr->type].print_ip_opts, cursor, stdout);
}
if (PRINT_FIELD(IREGS))
@@ -1959,6 +1966,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.attr = process_attr,
+ .event_update = perf_event__process_event_update,
.tracing_data = perf_event__process_tracing_data,
.build_id = perf_event__process_build_id,
.id_index = perf_event__process_id_index,
@@ -2020,6 +2028,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
"only consider symbols in these pids"),
OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
"only consider symbols in these tids"),
+ OPT_UINTEGER(0, "max-stack", &scripting_max_stack,
+ "Set the maximum stack depth when parsing the callchain, "
+ "anything beyond the specified depth will be ignored. "
+ "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
OPT_BOOLEAN('I', "show-info", &show_full_info,
"display extended information from perf.data file"),
OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path,
@@ -2055,6 +2067,8 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
NULL
};
+ scripting_max_stack = sysctl_perf_event_max_stack;
+
setup_scripting();
argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 307e8a1a003c..e459b685a4e9 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -298,6 +298,14 @@ static int read_counter(struct perf_evsel *counter)
return -1;
}
}
+
+ if (verbose > 1) {
+ fprintf(stat_config.output,
+ "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
+ perf_evsel__name(counter),
+ cpu,
+ count->val, count->ena, count->run);
+ }
}
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 833214979c4f..1793da585676 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -688,7 +688,7 @@ static int hist_iter__top_callback(struct hist_entry_iter *iter,
struct hist_entry *he = iter->he;
struct perf_evsel *evsel = iter->evsel;
- if (sort__has_sym && single)
+ if (perf_hpp_list.sym && single)
perf_top__record_precise_ip(top, he, evsel->idx, al->addr);
hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
@@ -886,7 +886,7 @@ static int perf_top__start_counters(struct perf_top *top)
struct perf_evlist *evlist = top->evlist;
struct record_opts *opts = &top->record_opts;
- perf_evlist__config(evlist, opts);
+ perf_evlist__config(evlist, opts, &callchain_param);
evlist__for_each(evlist, counter) {
try_again:
@@ -917,15 +917,15 @@ out_err:
return -1;
}
-static int perf_top__setup_sample_type(struct perf_top *top __maybe_unused)
+static int callchain_param__setup_sample_type(struct callchain_param *callchain)
{
- if (!sort__has_sym) {
- if (symbol_conf.use_callchain) {
+ if (!perf_hpp_list.sym) {
+ if (callchain->enabled) {
ui__error("Selected -g but \"sym\" not present in --sort/-s.");
return -EINVAL;
}
- } else if (callchain_param.mode != CHAIN_NONE) {
- if (callchain_register_param(&callchain_param) < 0) {
+ } else if (callchain->mode != CHAIN_NONE) {
+ if (callchain_register_param(callchain) < 0) {
ui__error("Can't register callchain params.\n");
return -EINVAL;
}
@@ -952,7 +952,7 @@ static int __cmd_top(struct perf_top *top)
goto out_delete;
}
- ret = perf_top__setup_sample_type(top);
+ ret = callchain_param__setup_sample_type(&callchain_param);
if (ret)
goto out_delete;
@@ -962,7 +962,7 @@ static int __cmd_top(struct perf_top *top)
machine__synthesize_threads(&top->session->machines.host, &opts->target,
top->evlist->threads, false, opts->proc_map_timeout);
- if (sort__has_socket) {
+ if (perf_hpp_list.socket) {
ret = perf_env__read_cpu_topology_map(&perf_env);
if (ret < 0)
goto out_err_cpu_topo;
@@ -1045,18 +1045,17 @@ callchain_opt(const struct option *opt, const char *arg, int unset)
static int
parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
- struct record_opts *record = (struct record_opts *)opt->value;
+ struct callchain_param *callchain = opt->value;
- record->callgraph_set = true;
- callchain_param.enabled = !unset;
- callchain_param.record_mode = CALLCHAIN_FP;
+ callchain->enabled = !unset;
+ callchain->record_mode = CALLCHAIN_FP;
/*
* --no-call-graph
*/
if (unset) {
symbol_conf.use_callchain = false;
- callchain_param.record_mode = CALLCHAIN_NONE;
+ callchain->record_mode = CALLCHAIN_NONE;
return 0;
}
@@ -1104,7 +1103,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
},
.proc_map_timeout = 500,
},
- .max_stack = PERF_MAX_STACK_DEPTH,
+ .max_stack = sysctl_perf_event_max_stack,
.sym_pcnt_filter = 5,
};
struct record_opts *opts = &top.record_opts;
@@ -1162,17 +1161,17 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
"output field(s): overhead, period, sample plus all of sort keys"),
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
"Show a column with the number of samples"),
- OPT_CALLBACK_NOOPT('g', NULL, &top.record_opts,
+ OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
NULL, "enables call-graph recording and display",
&callchain_opt),
- OPT_CALLBACK(0, "call-graph", &top.record_opts,
+ OPT_CALLBACK(0, "call-graph", &callchain_param,
"record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
top_callchain_help, &parse_callchain_opt),
OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
"Accumulate callchains of children and show total overhead as well"),
OPT_INTEGER(0, "max-stack", &top.max_stack,
"Set the maximum stack depth when parsing the callchain. "
- "Default: " __stringify(PERF_MAX_STACK_DEPTH)),
+ "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
"ignore callees of these functions in call graphs",
report_parse_ignore_callees_opt),
@@ -1256,7 +1255,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
sort__mode = SORT_MODE__TOP;
/* display thread wants entries to be collapsed in a different tree */
- sort__need_collapse = 1;
+ perf_hpp_list.need_collapse = 1;
if (top.use_stdio)
use_browser = 0;
@@ -1312,7 +1311,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
top.sym_evsel = perf_evlist__first(top.evlist);
- if (!symbol_conf.use_callchain) {
+ if (!callchain_param.enabled) {
symbol_conf.cumulate_callchain = false;
perf_hpp__cancel_cumulate();
}
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 93ac724fb635..6e5c325148e4 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -34,79 +34,76 @@
#include "trace-event.h"
#include "util/parse-events.h"
#include "util/bpf-loader.h"
+#include "callchain.h"
+#include "syscalltbl.h"
+#include "rb_resort.h"
-#include <libaudit.h>
+#include <libaudit.h> /* FIXME: Still needed for audit_errno_to_name */
#include <stdlib.h>
-#include <sys/mman.h>
-#include <linux/futex.h>
#include <linux/err.h>
-
-/* For older distros: */
-#ifndef MAP_STACK
-# define MAP_STACK 0x20000
-#endif
-
-#ifndef MADV_HWPOISON
-# define MADV_HWPOISON 100
-
-#endif
-
-#ifndef MADV_MERGEABLE
-# define MADV_MERGEABLE 12
-#endif
-
-#ifndef MADV_UNMERGEABLE
-# define MADV_UNMERGEABLE 13
-#endif
-
-#ifndef EFD_SEMAPHORE
-# define EFD_SEMAPHORE 1
-#endif
-
-#ifndef EFD_NONBLOCK
-# define EFD_NONBLOCK 00004000
-#endif
-
-#ifndef EFD_CLOEXEC
-# define EFD_CLOEXEC 02000000
-#endif
+#include <linux/filter.h>
+#include <linux/audit.h>
+#include <sys/ptrace.h>
+#include <linux/random.h>
+#include <linux/stringify.h>
#ifndef O_CLOEXEC
# define O_CLOEXEC 02000000
#endif
-#ifndef SOCK_DCCP
-# define SOCK_DCCP 6
-#endif
-
-#ifndef SOCK_CLOEXEC
-# define SOCK_CLOEXEC 02000000
-#endif
-
-#ifndef SOCK_NONBLOCK
-# define SOCK_NONBLOCK 00004000
-#endif
-
-#ifndef MSG_CMSG_CLOEXEC
-# define MSG_CMSG_CLOEXEC 0x40000000
-#endif
-
-#ifndef PERF_FLAG_FD_NO_GROUP
-# define PERF_FLAG_FD_NO_GROUP (1UL << 0)
-#endif
-
-#ifndef PERF_FLAG_FD_OUTPUT
-# define PERF_FLAG_FD_OUTPUT (1UL << 1)
-#endif
-
-#ifndef PERF_FLAG_PID_CGROUP
-# define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
-#endif
-
-#ifndef PERF_FLAG_FD_CLOEXEC
-# define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
-#endif
-
+struct trace {
+ struct perf_tool tool;
+ struct syscalltbl *sctbl;
+ struct {
+ int max;
+ struct syscall *table;
+ struct {
+ struct perf_evsel *sys_enter,
+ *sys_exit;
+ } events;
+ } syscalls;
+ struct record_opts opts;
+ struct perf_evlist *evlist;
+ struct machine *host;
+ struct thread *current;
+ u64 base_time;
+ FILE *output;
+ unsigned long nr_events;
+ struct strlist *ev_qualifier;
+ struct {
+ size_t nr;
+ int *entries;
+ } ev_qualifier_ids;
+ struct intlist *tid_list;
+ struct intlist *pid_list;
+ struct {
+ size_t nr;
+ pid_t *entries;
+ } filter_pids;
+ double duration_filter;
+ double runtime_ms;
+ struct {
+ u64 vfs_getname,
+ proc_getname;
+ } stats;
+ unsigned int max_stack;
+ unsigned int min_stack;
+ bool not_ev_qualifier;
+ bool live;
+ bool full_time;
+ bool sched;
+ bool multiple_threads;
+ bool summary;
+ bool summary_only;
+ bool show_comm;
+ bool show_tool_stats;
+ bool trace_syscalls;
+ bool kernel_syscallchains;
+ bool force;
+ bool vfs_getname;
+ int trace_pgfaults;
+ int open_id;
+};
struct tp_field {
int offset;
@@ -371,221 +368,6 @@ static size_t syscall_arg__scnprintf_int(char *bf, size_t size,
#define SCA_INT syscall_arg__scnprintf_int
-static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
- struct syscall_arg *arg)
-{
- int printed = 0, prot = arg->val;
-
- if (prot == PROT_NONE)
- return scnprintf(bf, size, "NONE");
-#define P_MMAP_PROT(n) \
- if (prot & PROT_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
- prot &= ~PROT_##n; \
- }
-
- P_MMAP_PROT(EXEC);
- P_MMAP_PROT(READ);
- P_MMAP_PROT(WRITE);
-#ifdef PROT_SEM
- P_MMAP_PROT(SEM);
-#endif
- P_MMAP_PROT(GROWSDOWN);
- P_MMAP_PROT(GROWSUP);
-#undef P_MMAP_PROT
-
- if (prot)
- printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", prot);
-
- return printed;
-}
-
-#define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
-
-static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
- struct syscall_arg *arg)
-{
- int printed = 0, flags = arg->val;
-
-#define P_MMAP_FLAG(n) \
- if (flags & MAP_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
- flags &= ~MAP_##n; \
- }
-
- P_MMAP_FLAG(SHARED);
- P_MMAP_FLAG(PRIVATE);
-#ifdef MAP_32BIT
- P_MMAP_FLAG(32BIT);
-#endif
- P_MMAP_FLAG(ANONYMOUS);
- P_MMAP_FLAG(DENYWRITE);
- P_MMAP_FLAG(EXECUTABLE);
- P_MMAP_FLAG(FILE);
- P_MMAP_FLAG(FIXED);
- P_MMAP_FLAG(GROWSDOWN);
-#ifdef MAP_HUGETLB
- P_MMAP_FLAG(HUGETLB);
-#endif
- P_MMAP_FLAG(LOCKED);
- P_MMAP_FLAG(NONBLOCK);
- P_MMAP_FLAG(NORESERVE);
- P_MMAP_FLAG(POPULATE);
- P_MMAP_FLAG(STACK);
-#ifdef MAP_UNINITIALIZED
- P_MMAP_FLAG(UNINITIALIZED);
-#endif
-#undef P_MMAP_FLAG
-
- if (flags)
- printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
-
- return printed;
-}
-
-#define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
-
-static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
- struct syscall_arg *arg)
-{
- int printed = 0, flags = arg->val;
-
-#define P_MREMAP_FLAG(n) \
- if (flags & MREMAP_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
- flags &= ~MREMAP_##n; \
- }
-
- P_MREMAP_FLAG(MAYMOVE);
-#ifdef MREMAP_FIXED
- P_MREMAP_FLAG(FIXED);
-#endif
-#undef P_MREMAP_FLAG
-
- if (flags)
- printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
-
- return printed;
-}
-
-#define SCA_MREMAP_FLAGS syscall_arg__scnprintf_mremap_flags
-
-static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
- struct syscall_arg *arg)
-{
- int behavior = arg->val;
-
- switch (behavior) {
-#define P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n)
- P_MADV_BHV(NORMAL);
- P_MADV_BHV(RANDOM);
- P_MADV_BHV(SEQUENTIAL);
- P_MADV_BHV(WILLNEED);
- P_MADV_BHV(DONTNEED);
- P_MADV_BHV(REMOVE);
- P_MADV_BHV(DONTFORK);
- P_MADV_BHV(DOFORK);
- P_MADV_BHV(HWPOISON);
-#ifdef MADV_SOFT_OFFLINE
- P_MADV_BHV(SOFT_OFFLINE);
-#endif
- P_MADV_BHV(MERGEABLE);
- P_MADV_BHV(UNMERGEABLE);
-#ifdef MADV_HUGEPAGE
- P_MADV_BHV(HUGEPAGE);
-#endif
-#ifdef MADV_NOHUGEPAGE
- P_MADV_BHV(NOHUGEPAGE);
-#endif
-#ifdef MADV_DONTDUMP
- P_MADV_BHV(DONTDUMP);
-#endif
-#ifdef MADV_DODUMP
- P_MADV_BHV(DODUMP);
-#endif
-#undef P_MADV_PHV
- default: break;
- }
-
- return scnprintf(bf, size, "%#x", behavior);
-}
-
-#define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
-
-static size_t syscall_arg__scnprintf_flock(char *bf, size_t size,
- struct syscall_arg *arg)
-{
- int printed = 0, op = arg->val;
-
- if (op == 0)
- return scnprintf(bf, size, "NONE");
-#define P_CMD(cmd) \
- if ((op & LOCK_##cmd) == LOCK_##cmd) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #cmd); \
- op &= ~LOCK_##cmd; \
- }
-
- P_CMD(SH);
- P_CMD(EX);
- P_CMD(NB);
- P_CMD(UN);
- P_CMD(MAND);
- P_CMD(RW);
- P_CMD(READ);
- P_CMD(WRITE);
-#undef P_OP
-
- if (op)
- printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", op);
-
- return printed;
-}
-
-#define SCA_FLOCK syscall_arg__scnprintf_flock
-
-static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct syscall_arg *arg)
-{
- enum syscall_futex_args {
- SCF_UADDR = (1 << 0),
- SCF_OP = (1 << 1),
- SCF_VAL = (1 << 2),
- SCF_TIMEOUT = (1 << 3),
- SCF_UADDR2 = (1 << 4),
- SCF_VAL3 = (1 << 5),
- };
- int op = arg->val;
- int cmd = op & FUTEX_CMD_MASK;
- size_t printed = 0;
-
- switch (cmd) {
-#define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n);
- P_FUTEX_OP(WAIT); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
- P_FUTEX_OP(WAKE); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
- P_FUTEX_OP(FD); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
- P_FUTEX_OP(REQUEUE); arg->mask |= SCF_VAL3|SCF_TIMEOUT; break;
- P_FUTEX_OP(CMP_REQUEUE); arg->mask |= SCF_TIMEOUT; break;
- P_FUTEX_OP(CMP_REQUEUE_PI); arg->mask |= SCF_TIMEOUT; break;
- P_FUTEX_OP(WAKE_OP); break;
- P_FUTEX_OP(LOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
- P_FUTEX_OP(UNLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
- P_FUTEX_OP(TRYLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
- P_FUTEX_OP(WAIT_BITSET); arg->mask |= SCF_UADDR2; break;
- P_FUTEX_OP(WAKE_BITSET); arg->mask |= SCF_UADDR2; break;
- P_FUTEX_OP(WAIT_REQUEUE_PI); break;
- default: printed = scnprintf(bf, size, "%#x", cmd); break;
- }
-
- if (op & FUTEX_PRIVATE_FLAG)
- printed += scnprintf(bf + printed, size - printed, "|PRIV");
-
- if (op & FUTEX_CLOCK_REALTIME)
- printed += scnprintf(bf + printed, size - printed, "|CLKRT");
-
- return printed;
-}
-
-#define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op
-
static const char *bpf_cmd[] = {
"MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
"MAP_GET_NEXT_KEY", "PROG_LOAD",
@@ -652,110 +434,6 @@ static const char *socket_families[] = {
};
static DEFINE_STRARRAY(socket_families);
-#ifndef SOCK_TYPE_MASK
-#define SOCK_TYPE_MASK 0xf
-#endif
-
-static size_t syscall_arg__scnprintf_socket_type(char *bf, size_t size,
- struct syscall_arg *arg)
-{
- size_t printed;
- int type = arg->val,
- flags = type & ~SOCK_TYPE_MASK;
-
- type &= SOCK_TYPE_MASK;
- /*
- * Can't use a strarray, MIPS may override for ABI reasons.
- */
- switch (type) {
-#define P_SK_TYPE(n) case SOCK_##n: printed = scnprintf(bf, size, #n); break;
- P_SK_TYPE(STREAM);
- P_SK_TYPE(DGRAM);
- P_SK_TYPE(RAW);
- P_SK_TYPE(RDM);
- P_SK_TYPE(SEQPACKET);
- P_SK_TYPE(DCCP);
- P_SK_TYPE(PACKET);
-#undef P_SK_TYPE
- default:
- printed = scnprintf(bf, size, "%#x", type);
- }
-
-#define P_SK_FLAG(n) \
- if (flags & SOCK_##n) { \
- printed += scnprintf(bf + printed, size - printed, "|%s", #n); \
- flags &= ~SOCK_##n; \
- }
-
- P_SK_FLAG(CLOEXEC);
- P_SK_FLAG(NONBLOCK);
-#undef P_SK_FLAG
-
- if (flags)
- printed += scnprintf(bf + printed, size - printed, "|%#x", flags);
-
- return printed;
-}
-
-#define SCA_SK_TYPE syscall_arg__scnprintf_socket_type
-
-#ifndef MSG_PROBE
-#define MSG_PROBE 0x10
-#endif
-#ifndef MSG_WAITFORONE
-#define MSG_WAITFORONE 0x10000
-#endif
-#ifndef MSG_SENDPAGE_NOTLAST
-#define MSG_SENDPAGE_NOTLAST 0x20000
-#endif
-#ifndef MSG_FASTOPEN
-#define MSG_FASTOPEN 0x20000000
-#endif
-
-static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
- struct syscall_arg *arg)
-{
- int printed = 0, flags = arg->val;
-
- if (flags == 0)
- return scnprintf(bf, size, "NONE");
-#define P_MSG_FLAG(n) \
- if (flags & MSG_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
- flags &= ~MSG_##n; \
- }
-
- P_MSG_FLAG(OOB);
- P_MSG_FLAG(PEEK);
- P_MSG_FLAG(DONTROUTE);
- P_MSG_FLAG(TRYHARD);
- P_MSG_FLAG(CTRUNC);
- P_MSG_FLAG(PROBE);
- P_MSG_FLAG(TRUNC);
- P_MSG_FLAG(DONTWAIT);
- P_MSG_FLAG(EOR);
- P_MSG_FLAG(WAITALL);
- P_MSG_FLAG(FIN);
- P_MSG_FLAG(SYN);
- P_MSG_FLAG(CONFIRM);
- P_MSG_FLAG(RST);
- P_MSG_FLAG(ERRQUEUE);
- P_MSG_FLAG(NOSIGNAL);
- P_MSG_FLAG(MORE);
- P_MSG_FLAG(WAITFORONE);
- P_MSG_FLAG(SENDPAGE_NOTLAST);
- P_MSG_FLAG(FASTOPEN);
- P_MSG_FLAG(CMSG_CLOEXEC);
-#undef P_MSG_FLAG
-
- if (flags)
- printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
-
- return printed;
-}
-
-#define SCA_MSG_FLAGS syscall_arg__scnprintf_msg_flags
-
static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
struct syscall_arg *arg)
{
@@ -788,116 +466,6 @@ static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
#define SCA_FILENAME syscall_arg__scnprintf_filename
-static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
- struct syscall_arg *arg)
-{
- int printed = 0, flags = arg->val;
-
- if (!(flags & O_CREAT))
- arg->mask |= 1 << (arg->idx + 1); /* Mask the mode parm */
-
- if (flags == 0)
- return scnprintf(bf, size, "RDONLY");
-#define P_FLAG(n) \
- if (flags & O_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
- flags &= ~O_##n; \
- }
-
- P_FLAG(APPEND);
- P_FLAG(ASYNC);
- P_FLAG(CLOEXEC);
- P_FLAG(CREAT);
- P_FLAG(DIRECT);
- P_FLAG(DIRECTORY);
- P_FLAG(EXCL);
- P_FLAG(LARGEFILE);
- P_FLAG(NOATIME);
- P_FLAG(NOCTTY);
-#ifdef O_NONBLOCK
- P_FLAG(NONBLOCK);
-#elif O_NDELAY
- P_FLAG(NDELAY);
-#endif
-#ifdef O_PATH
- P_FLAG(PATH);
-#endif
- P_FLAG(RDWR);
-#ifdef O_DSYNC
- if ((flags & O_SYNC) == O_SYNC)
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC");
- else {
- P_FLAG(DSYNC);
- }
-#else
- P_FLAG(SYNC);
-#endif
- P_FLAG(TRUNC);
- P_FLAG(WRONLY);
-#undef P_FLAG
-
- if (flags)
- printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
-
- return printed;
-}
-
-#define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
-
-static size_t syscall_arg__scnprintf_perf_flags(char *bf, size_t size,
- struct syscall_arg *arg)
-{
- int printed = 0, flags = arg->val;
-
- if (flags == 0)
- return 0;
-
-#define P_FLAG(n) \
- if (flags & PERF_FLAG_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
- flags &= ~PERF_FLAG_##n; \
- }
-
- P_FLAG(FD_NO_GROUP);
- P_FLAG(FD_OUTPUT);
- P_FLAG(PID_CGROUP);
- P_FLAG(FD_CLOEXEC);
-#undef P_FLAG
-
- if (flags)
- printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
-
- return printed;
-}
-
-#define SCA_PERF_FLAGS syscall_arg__scnprintf_perf_flags
-
-static size_t syscall_arg__scnprintf_eventfd_flags(char *bf, size_t size,
- struct syscall_arg *arg)
-{
- int printed = 0, flags = arg->val;
-
- if (flags == 0)
- return scnprintf(bf, size, "NONE");
-#define P_FLAG(n) \
- if (flags & EFD_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
- flags &= ~EFD_##n; \
- }
-
- P_FLAG(SEMAPHORE);
- P_FLAG(CLOEXEC);
- P_FLAG(NONBLOCK);
-#undef P_FLAG
-
- if (flags)
- printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
-
- return printed;
-}
-
-#define SCA_EFD_FLAGS syscall_arg__scnprintf_eventfd_flags
-
static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
struct syscall_arg *arg)
{
@@ -921,59 +489,6 @@ static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
#define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
-static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscall_arg *arg)
-{
- int sig = arg->val;
-
- switch (sig) {
-#define P_SIGNUM(n) case SIG##n: return scnprintf(bf, size, #n)
- P_SIGNUM(HUP);
- P_SIGNUM(INT);
- P_SIGNUM(QUIT);
- P_SIGNUM(ILL);
- P_SIGNUM(TRAP);
- P_SIGNUM(ABRT);
- P_SIGNUM(BUS);
- P_SIGNUM(FPE);
- P_SIGNUM(KILL);
- P_SIGNUM(USR1);
- P_SIGNUM(SEGV);
- P_SIGNUM(USR2);
- P_SIGNUM(PIPE);
- P_SIGNUM(ALRM);
- P_SIGNUM(TERM);
- P_SIGNUM(CHLD);
- P_SIGNUM(CONT);
- P_SIGNUM(STOP);
- P_SIGNUM(TSTP);
- P_SIGNUM(TTIN);
- P_SIGNUM(TTOU);
- P_SIGNUM(URG);
- P_SIGNUM(XCPU);
- P_SIGNUM(XFSZ);
- P_SIGNUM(VTALRM);
- P_SIGNUM(PROF);
- P_SIGNUM(WINCH);
- P_SIGNUM(IO);
- P_SIGNUM(PWR);
- P_SIGNUM(SYS);
-#ifdef SIGEMT
- P_SIGNUM(EMT);
-#endif
-#ifdef SIGSTKFLT
- P_SIGNUM(STKFLT);
-#endif
-#ifdef SIGSWI
- P_SIGNUM(SWI);
-#endif
- default: break;
- }
-
- return scnprintf(bf, size, "%#x", sig);
-}
-
-#define SCA_SIGNUM syscall_arg__scnprintf_signum
-
#if defined(__i386__) || defined(__x86_64__)
/*
* FIXME: Make this available to all arches.
@@ -1001,16 +516,62 @@ static const char *tioctls[] = {
static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401);
#endif /* defined(__i386__) || defined(__x86_64__) */
+#ifndef GRND_NONBLOCK
+#define GRND_NONBLOCK 0x0001
+#endif
+#ifndef GRND_RANDOM
+#define GRND_RANDOM 0x0002
+#endif
+
+static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int printed = 0, flags = arg->val;
+
+#define P_FLAG(n) \
+ if (flags & GRND_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ flags &= ~GRND_##n; \
+ }
+
+ P_FLAG(RANDOM);
+ P_FLAG(NONBLOCK);
+#undef P_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+ return printed;
+}
+
+#define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
+
#define STRARRAY(arg, name, array) \
.arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
.arg_parm = { [arg] = &strarray__##array, }
+#include "trace/beauty/eventfd.c"
+#include "trace/beauty/flock.c"
+#include "trace/beauty/futex_op.c"
+#include "trace/beauty/mmap.c"
+#include "trace/beauty/mode_t.c"
+#include "trace/beauty/msg_flags.c"
+#include "trace/beauty/open_flags.c"
+#include "trace/beauty/perf_event_open.c"
+#include "trace/beauty/pid.c"
+#include "trace/beauty/sched_policy.c"
+#include "trace/beauty/seccomp.c"
+#include "trace/beauty/signum.c"
+#include "trace/beauty/socket_type.c"
+#include "trace/beauty/waitid_options.c"
+
static struct syscall_fmt {
const char *name;
const char *alias;
size_t (*arg_scnprintf[6])(char *bf, size_t size, struct syscall_arg *arg);
void *arg_parm[6];
bool errmsg;
+ bool errpid;
bool timeout;
bool hexret;
} syscall_fmts[] = {
@@ -1028,6 +589,7 @@ static struct syscall_fmt {
{ .name = "chroot", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
{ .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
+ { .name = "clone", .errpid = true, },
{ .name = "close", .errmsg = true,
.arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
{ .name = "connect", .errmsg = true, },
@@ -1093,6 +655,11 @@ static struct syscall_fmt {
{ .name = "getdents64", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
+ { .name = "getpid", .errpid = true, },
+ { .name = "getpgid", .errpid = true, },
+ { .name = "getppid", .errpid = true, },
+ { .name = "getrandom", .errmsg = true,
+ .arg_scnprintf = { [2] = SCA_GETRANDOM_FLAGS, /* flags */ }, },
{ .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
{ .name = "getxattr", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
@@ -1186,8 +753,7 @@ static struct syscall_fmt {
[1] = SCA_FILENAME, /* filename */
[2] = SCA_OPEN_FLAGS, /* flags */ }, },
{ .name = "perf_event_open", .errmsg = true,
- .arg_scnprintf = { [1] = SCA_INT, /* pid */
- [2] = SCA_INT, /* cpu */
+ .arg_scnprintf = { [2] = SCA_INT, /* cpu */
[3] = SCA_FD, /* group_fd */
[4] = SCA_PERF_FLAGS, /* flags */ }, },
{ .name = "pipe2", .errmsg = true,
@@ -1234,6 +800,11 @@ static struct syscall_fmt {
.arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
{ .name = "rt_tgsigqueueinfo", .errmsg = true,
.arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
+ { .name = "sched_setscheduler", .errmsg = true,
+ .arg_scnprintf = { [1] = SCA_SCHED_POLICY, /* policy */ }, },
+ { .name = "seccomp", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_SECCOMP_OP, /* op */
+ [1] = SCA_SECCOMP_FLAGS, /* flags */ }, },
{ .name = "select", .errmsg = true, .timeout = true, },
{ .name = "sendmmsg", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */
@@ -1244,7 +815,9 @@ static struct syscall_fmt {
{ .name = "sendto", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */
[3] = SCA_MSG_FLAGS, /* flags */ }, },
+ { .name = "set_tid_address", .errpid = true, },
{ .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
+ { .name = "setpgid", .errmsg = true, },
{ .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
{ .name = "setxattr", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
@@ -1287,6 +860,10 @@ static struct syscall_fmt {
.arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
{ .name = "vmsplice", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "wait4", .errpid = true,
+ .arg_scnprintf = { [2] = SCA_WAITID_OPTIONS, /* options */ }, },
+ { .name = "waitid", .errpid = true,
+ .arg_scnprintf = { [3] = SCA_WAITID_OPTIONS, /* options */ }, },
{ .name = "write", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "writev", .errmsg = true,
@@ -1398,59 +975,6 @@ fail:
static const size_t trace__entry_str_size = 2048;
-struct trace {
- struct perf_tool tool;
- struct {
- int machine;
- int open_id;
- } audit;
- struct {
- int max;
- struct syscall *table;
- struct {
- struct perf_evsel *sys_enter,
- *sys_exit;
- } events;
- } syscalls;
- struct record_opts opts;
- struct perf_evlist *evlist;
- struct machine *host;
- struct thread *current;
- u64 base_time;
- FILE *output;
- unsigned long nr_events;
- struct strlist *ev_qualifier;
- struct {
- size_t nr;
- int *entries;
- } ev_qualifier_ids;
- struct intlist *tid_list;
- struct intlist *pid_list;
- struct {
- size_t nr;
- pid_t *entries;
- } filter_pids;
- double duration_filter;
- double runtime_ms;
- struct {
- u64 vfs_getname,
- proc_getname;
- } stats;
- bool not_ev_qualifier;
- bool live;
- bool full_time;
- bool sched;
- bool multiple_threads;
- bool summary;
- bool summary_only;
- bool show_comm;
- bool show_tool_stats;
- bool trace_syscalls;
- bool force;
- bool vfs_getname;
- int trace_pgfaults;
-};
-
static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
{
struct thread_trace *ttrace = thread__priv(thread);
@@ -1618,6 +1142,7 @@ static int trace__process_event(struct trace *trace, struct machine *machine,
color_fprintf(trace->output, PERF_COLOR_RED,
"LOST %" PRIu64 " events!\n", event->lost.lost);
ret = machine__process_lost_event(machine, event, sample);
+ break;
default:
ret = machine__process_event(machine, event, sample);
break;
@@ -1675,6 +1200,10 @@ static int syscall__set_arg_fmts(struct syscall *sc)
sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
else if (field->flags & FIELD_IS_POINTER)
sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
+ else if (strcmp(field->type, "pid_t") == 0)
+ sc->arg_scnprintf[idx] = SCA_PID;
+ else if (strcmp(field->type, "umode_t") == 0)
+ sc->arg_scnprintf[idx] = SCA_MODE_T;
++idx;
}
@@ -1685,7 +1214,7 @@ static int trace__read_syscall_info(struct trace *trace, int id)
{
char tp_name[128];
struct syscall *sc;
- const char *name = audit_syscall_to_name(id, trace->audit.machine);
+ const char *name = syscalltbl__name(trace->sctbl, id);
if (name == NULL)
return -1;
@@ -1760,7 +1289,7 @@ static int trace__validate_ev_qualifier(struct trace *trace)
strlist__for_each(pos, trace->ev_qualifier) {
const char *sc = pos->s;
- int id = audit_name_to_syscall(sc, trace->audit.machine);
+ int id = syscalltbl__id(trace->sctbl, sc);
if (id < 0) {
if (err == 0) {
@@ -1846,7 +1375,12 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
"%ld", val);
}
}
- } else {
+ } else if (IS_ERR(sc->tp_format)) {
+ /*
+ * If we managed to read the tracepoint /format file, then we
+ * may end up not having any args, like with gettid(), so only
+ * print the raw args when we didn't manage to read it.
+ */
int i = 0;
while (i < 6) {
@@ -1987,7 +1521,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
goto out_put;
}
- if (!trace->summary_only)
+ if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
trace__printf_interrupted_entry(trace, sample);
ttrace->entry_time = sample->time;
@@ -1998,7 +1532,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
args, trace, thread);
if (sc->is_exit) {
- if (!trace->duration_filter && !trace->summary_only) {
+ if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
fprintf(trace->output, "%-70s\n", ttrace->entry_str);
}
@@ -2018,6 +1552,29 @@ out_put:
return err;
}
+static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct callchain_cursor *cursor)
+{
+ struct addr_location al;
+
+ if (machine__resolve(trace->host, &al, sample) < 0 ||
+ thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, trace->max_stack))
+ return -1;
+
+ return 0;
+}
+
+static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
+{
+ /* TODO: user-configurable print_opts */
+ const unsigned int print_opts = EVSEL__PRINT_SYM |
+ EVSEL__PRINT_DSO |
+ EVSEL__PRINT_UNKNOWN_AS_ADDR;
+
+ return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output);
+}
+
static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
@@ -2025,7 +1582,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
long ret;
u64 duration = 0;
struct thread *thread;
- int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
+ int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0;
struct syscall *sc = trace__syscall_info(trace, evsel, id);
struct thread_trace *ttrace;
@@ -2042,7 +1599,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
- if (id == trace->audit.open_id && ret >= 0 && ttrace->filename.pending_open) {
+ if (id == trace->open_id && ret >= 0 && ttrace->filename.pending_open) {
trace__set_fd_pathname(thread, ret, ttrace->filename.name);
ttrace->filename.pending_open = false;
++trace->stats.vfs_getname;
@@ -2057,6 +1614,15 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
} else if (trace->duration_filter)
goto out;
+ if (sample->callchain) {
+ callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
+ if (callchain_ret == 0) {
+ if (callchain_cursor.nr < trace->min_stack)
+ goto out;
+ callchain_ret = 1;
+ }
+ }
+
if (trace->summary_only)
goto out;
@@ -2073,7 +1639,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
if (sc->fmt == NULL) {
signed_print:
fprintf(trace->output, ") = %ld", ret);
- } else if (ret < 0 && sc->fmt->errmsg) {
+ } else if (ret < 0 && (sc->fmt->errmsg || sc->fmt->errpid)) {
char bf[STRERR_BUFSIZE];
const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
*e = audit_errno_to_name(-ret);
@@ -2083,10 +1649,24 @@ signed_print:
fprintf(trace->output, ") = 0 Timeout");
else if (sc->fmt->hexret)
fprintf(trace->output, ") = %#lx", ret);
- else
+ else if (sc->fmt->errpid) {
+ struct thread *child = machine__find_thread(trace->host, ret, ret);
+
+ if (child != NULL) {
+ fprintf(trace->output, ") = %ld", ret);
+ if (child->comm_set)
+ fprintf(trace->output, " (%s)", thread__comm_str(child));
+ thread__put(child);
+ }
+ } else
goto signed_print;
fputc('\n', trace->output);
+
+ if (callchain_ret > 0)
+ trace__fprintf_callchain(trace, sample);
+ else if (callchain_ret < 0)
+ pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
out:
ttrace->entry_pending = false;
err = 0;
@@ -2217,6 +1797,17 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
{
+ int callchain_ret = 0;
+
+ if (sample->callchain) {
+ callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
+ if (callchain_ret == 0) {
+ if (callchain_cursor.nr < trace->min_stack)
+ goto out;
+ callchain_ret = 1;
+ }
+ }
+
trace__printf_interrupted_entry(trace, sample);
trace__fprintf_tstamp(trace, sample->time, trace->output);
@@ -2234,6 +1825,12 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
}
fprintf(trace->output, ")\n");
+
+ if (callchain_ret > 0)
+ trace__fprintf_callchain(trace, sample);
+ else if (callchain_ret < 0)
+ pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
+out:
return 0;
}
@@ -2264,8 +1861,19 @@ static int trace__pgfault(struct trace *trace,
char map_type = 'd';
struct thread_trace *ttrace;
int err = -1;
+ int callchain_ret = 0;
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
+
+ if (sample->callchain) {
+ callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
+ if (callchain_ret == 0) {
+ if (callchain_cursor.nr < trace->min_stack)
+ goto out_put;
+ callchain_ret = 1;
+ }
+ }
+
ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
goto out_put;
@@ -2307,6 +1915,11 @@ static int trace__pgfault(struct trace *trace,
print_location(trace->output, sample, &al, true, false);
fprintf(trace->output, " (%c%c)\n", map_type, al.level);
+
+ if (callchain_ret > 0)
+ trace__fprintf_callchain(trace, sample);
+ else if (callchain_ret < 0)
+ pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
out:
err = 0;
out_put:
@@ -2326,6 +1939,23 @@ static bool skip_sample(struct trace *trace, struct perf_sample *sample)
return false;
}
+static void trace__set_base_time(struct trace *trace,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample)
+{
+ /*
+ * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
+ * and don't use sample->time unconditionally, we may end up having
+ * some other event in the future without PERF_SAMPLE_TIME for good
+ * reason, i.e. we may not be interested in its timestamps, just in
+ * it taking place, picking some piece of information when it
+ * appears in our event stream (vfs_getname comes to mind).
+ */
+ if (trace->base_time == 0 && !trace->full_time &&
+ (evsel->attr.sample_type & PERF_SAMPLE_TIME))
+ trace->base_time = sample->time;
+}
+
static int trace__process_sample(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -2340,8 +1970,7 @@ static int trace__process_sample(struct perf_tool *tool,
if (skip_sample(trace, sample))
return 0;
- if (!trace->full_time && trace->base_time == 0)
- trace->base_time = sample->time;
+ trace__set_base_time(trace, evsel, sample);
if (handler) {
++trace->nr_events;
@@ -2450,8 +2079,7 @@ static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
return true;
}
-static int perf_evlist__add_pgfault(struct perf_evlist *evlist,
- u64 config)
+static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
{
struct perf_evsel *evsel;
struct perf_event_attr attr = {
@@ -2465,13 +2093,10 @@ static int perf_evlist__add_pgfault(struct perf_evlist *evlist,
event_attr_init(&attr);
evsel = perf_evsel__new(&attr);
- if (!evsel)
- return -ENOMEM;
-
- evsel->handler = trace__pgfault;
- perf_evlist__add(evlist, evsel);
+ if (evsel)
+ evsel->handler = trace__pgfault;
- return 0;
+ return evsel;
}
static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
@@ -2479,9 +2104,6 @@ static void trace__handle_event(struct trace *trace, union perf_event *event, st
const u32 type = event->header.type;
struct perf_evsel *evsel;
- if (!trace->full_time && trace->base_time == 0)
- trace->base_time = sample->time;
-
if (type != PERF_RECORD_SAMPLE) {
trace__process_event(trace, trace->host, event, sample);
return;
@@ -2493,6 +2115,8 @@ static void trace__handle_event(struct trace *trace, union perf_event *event, st
return;
}
+ trace__set_base_time(trace, evsel, sample);
+
if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
sample->raw_data == NULL) {
fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
@@ -2527,6 +2151,15 @@ static int trace__add_syscall_newtp(struct trace *trace)
perf_evlist__add(evlist, sys_enter);
perf_evlist__add(evlist, sys_exit);
+ if (callchain_param.enabled && !trace->kernel_syscallchains) {
+ /*
+ * We're interested only in the user space callchain
+ * leading to the syscall, allow overriding that for
+ * debugging reasons using --kernel_syscall_callchains
+ */
+ sys_exit->attr.exclude_callchain_kernel = 1;
+ }
+
trace->syscalls.events.sys_enter = sys_enter;
trace->syscalls.events.sys_exit = sys_exit;
@@ -2565,7 +2198,7 @@ out_enomem:
static int trace__run(struct trace *trace, int argc, const char **argv)
{
struct perf_evlist *evlist = trace->evlist;
- struct perf_evsel *evsel;
+ struct perf_evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
int err = -1, i;
unsigned long before;
const bool forks = argc > 0;
@@ -2579,14 +2212,19 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (trace->trace_syscalls)
trace->vfs_getname = perf_evlist__add_vfs_getname(evlist);
- if ((trace->trace_pgfaults & TRACE_PFMAJ) &&
- perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ)) {
- goto out_error_mem;
+ if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
+ pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
+ if (pgfault_maj == NULL)
+ goto out_error_mem;
+ perf_evlist__add(evlist, pgfault_maj);
}
- if ((trace->trace_pgfaults & TRACE_PFMIN) &&
- perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MIN))
- goto out_error_mem;
+ if ((trace->trace_pgfaults & TRACE_PFMIN)) {
+ pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
+ if (pgfault_min == NULL)
+ goto out_error_mem;
+ perf_evlist__add(evlist, pgfault_min);
+ }
if (trace->sched &&
perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
@@ -2605,7 +2243,45 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
goto out_delete_evlist;
}
- perf_evlist__config(evlist, &trace->opts);
+ perf_evlist__config(evlist, &trace->opts, NULL);
+
+ if (callchain_param.enabled) {
+ bool use_identifier = false;
+
+ if (trace->syscalls.events.sys_exit) {
+ perf_evsel__config_callchain(trace->syscalls.events.sys_exit,
+ &trace->opts, &callchain_param);
+ use_identifier = true;
+ }
+
+ if (pgfault_maj) {
+ perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
+ use_identifier = true;
+ }
+
+ if (pgfault_min) {
+ perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
+ use_identifier = true;
+ }
+
+ if (use_identifier) {
+ /*
+ * Now we have evsels with different sample_ids, use
+ * PERF_SAMPLE_IDENTIFIER to map from sample to evsel
+ * from a fixed position in each ring buffer record.
+ *
+ * As of this the changeset introducing this comment, this
+ * isn't strictly needed, as the fields that can come before
+ * PERF_SAMPLE_ID are all used, but we'll probably disable
+ * some of those for things like copying the payload of
+ * pointer syscall arguments, and for vfs_getname we don't
+ * need PERF_SAMPLE_ADDR and PERF_SAMPLE_IP, so do this
+ * here as a warning we need to use PERF_SAMPLE_IDENTIFIER.
+ */
+ perf_evlist__set_sample_bit(evlist, IDENTIFIER);
+ perf_evlist__reset_sample_bit(evlist, ID);
+ }
+ }
signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler);
@@ -2883,15 +2559,29 @@ static size_t trace__fprintf_threads_header(FILE *fp)
return printed;
}
+DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
+ struct stats *stats;
+ double msecs;
+ int syscall;
+)
+{
+ struct int_node *source = rb_entry(nd, struct int_node, rb_node);
+ struct stats *stats = source->priv;
+
+ entry->syscall = source->i;
+ entry->stats = stats;
+ entry->msecs = stats ? (u64)stats->n * (avg_stats(stats) / NSEC_PER_MSEC) : 0;
+}
+
static size_t thread__dump_stats(struct thread_trace *ttrace,
struct trace *trace, FILE *fp)
{
- struct stats *stats;
size_t printed = 0;
struct syscall *sc;
- struct int_node *inode = intlist__first(ttrace->syscall_stats);
+ struct rb_node *nd;
+ DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
- if (inode == NULL)
+ if (syscall_stats == NULL)
return 0;
printed += fprintf(fp, "\n");
@@ -2900,9 +2590,8 @@ static size_t thread__dump_stats(struct thread_trace *ttrace,
printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
printed += fprintf(fp, " --------------- -------- --------- --------- --------- --------- ------\n");
- /* each int_node is a syscall */
- while (inode) {
- stats = inode->priv;
+ resort_rb__for_each(nd, syscall_stats) {
+ struct stats *stats = syscall_stats_entry->stats;
if (stats) {
double min = (double)(stats->min) / NSEC_PER_MSEC;
double max = (double)(stats->max) / NSEC_PER_MSEC;
@@ -2913,34 +2602,23 @@ static size_t thread__dump_stats(struct thread_trace *ttrace,
pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
avg /= NSEC_PER_MSEC;
- sc = &trace->syscalls.table[inode->i];
+ sc = &trace->syscalls.table[syscall_stats_entry->syscall];
printed += fprintf(fp, " %-15s", sc->name);
printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
- n, avg * n, min, avg);
+ n, syscall_stats_entry->msecs, min, avg);
printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
}
-
- inode = intlist__next(inode);
}
+ resort_rb__delete(syscall_stats);
printed += fprintf(fp, "\n\n");
return printed;
}
-/* struct used to pass data to per-thread function */
-struct summary_data {
- FILE *fp;
- struct trace *trace;
- size_t printed;
-};
-
-static int trace__fprintf_one_thread(struct thread *thread, void *priv)
+static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
{
- struct summary_data *data = priv;
- FILE *fp = data->fp;
- size_t printed = data->printed;
- struct trace *trace = data->trace;
+ size_t printed = 0;
struct thread_trace *ttrace = thread__priv(thread);
double ratio;
@@ -2956,25 +2634,45 @@ static int trace__fprintf_one_thread(struct thread *thread, void *priv)
printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
if (ttrace->pfmin)
printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
- printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
+ if (trace->sched)
+ printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
+ else if (fputc('\n', fp) != EOF)
+ ++printed;
+
printed += thread__dump_stats(ttrace, trace, fp);
- data->printed += printed;
+ return printed;
+}
- return 0;
+static unsigned long thread__nr_events(struct thread_trace *ttrace)
+{
+ return ttrace ? ttrace->nr_events : 0;
+}
+
+DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
+ struct thread *thread;
+)
+{
+ entry->thread = rb_entry(nd, struct thread, rb_node);
}
static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
{
- struct summary_data data = {
- .fp = fp,
- .trace = trace
- };
- data.printed = trace__fprintf_threads_header(fp);
+ DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host);
+ size_t printed = trace__fprintf_threads_header(fp);
+ struct rb_node *nd;
+
+ if (threads == NULL) {
+ fprintf(fp, "%s", "Error sorting output by nr_events!\n");
+ return 0;
+ }
- machine__for_each_thread(trace->host, trace__fprintf_one_thread, &data);
+ resort_rb__for_each(nd, threads)
+ printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
- return data.printed;
+ resort_rb__delete(threads);
+
+ return printed;
}
static int trace__set_duration(const struct option *opt, const char *str,
@@ -3070,10 +2768,6 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
NULL
};
struct trace trace = {
- .audit = {
- .machine = audit_detect_machine(),
- .open_id = audit_name_to_syscall("open", trace.audit.machine),
- },
.syscalls = {
. max = -1,
},
@@ -3091,6 +2785,8 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
.output = stderr,
.show_comm = true,
.trace_syscalls = true,
+ .kernel_syscallchains = false,
+ .max_stack = UINT_MAX,
};
const char *output_name = NULL;
const char *ev_qualifier_str = NULL;
@@ -3136,10 +2832,24 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
"Trace pagefaults", parse_pagefaults, "maj"),
OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
+ OPT_CALLBACK(0, "call-graph", &trace.opts,
+ "record_mode[,record_size]", record_callchain_help,
+ &record_parse_callchain_opt),
+ OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
+ "Show the kernel callchains on the syscall exit path"),
+ OPT_UINTEGER(0, "min-stack", &trace.min_stack,
+ "Set the minimum stack depth when parsing the callchain, "
+ "anything below the specified depth will be ignored."),
+ OPT_UINTEGER(0, "max-stack", &trace.max_stack,
+ "Set the maximum stack depth when parsing the callchain, "
+ "anything beyond the specified depth will be ignored. "
+ "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
OPT_END()
};
+ bool __maybe_unused max_stack_user_set = true;
+ bool mmap_pages_user_set = true;
const char * const trace_subcommands[] = { "record", NULL };
int err;
char bf[BUFSIZ];
@@ -3148,8 +2858,9 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
signal(SIGFPE, sighandler_dump_stack);
trace.evlist = perf_evlist__new();
+ trace.sctbl = syscalltbl__new();
- if (trace.evlist == NULL) {
+ if (trace.evlist == NULL || trace.sctbl == NULL) {
pr_err("Not enough memory to run!\n");
err = -ENOMEM;
goto out;
@@ -3158,11 +2869,40 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
+ err = bpf__setup_stdout(trace.evlist);
+ if (err) {
+ bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
+ pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
+ goto out;
+ }
+
+ err = -1;
+
if (trace.trace_pgfaults) {
trace.opts.sample_address = true;
trace.opts.sample_time = true;
}
+ if (trace.opts.mmap_pages == UINT_MAX)
+ mmap_pages_user_set = false;
+
+ if (trace.max_stack == UINT_MAX) {
+ trace.max_stack = sysctl_perf_event_max_stack;
+ max_stack_user_set = false;
+ }
+
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+ if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled)
+ record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
+#endif
+
+ if (callchain_param.enabled) {
+ if (!mmap_pages_user_set && geteuid() == 0)
+ trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
+
+ symbol_conf.use_callchain = true;
+ }
+
if (trace.evlist->nr_entries > 0)
evlist__set_evsel_handler(trace.evlist, trace__event_handler);
@@ -3179,6 +2919,11 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
return -1;
}
+ if (!trace.trace_syscalls && ev_qualifier_str) {
+ pr_err("The -e option can't be used with --no-syscalls.\n");
+ goto out;
+ }
+
if (output_name != NULL) {
err = trace__open_output(&trace, output_name);
if (err < 0) {
@@ -3187,6 +2932,8 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
}
}
+ trace.open_id = syscalltbl__id(trace.sctbl, "open");
+
if (ev_qualifier_str != NULL) {
const char *s = ev_qualifier_str;
struct strlist_config slist_config = {
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 6f8f6430f2bf..1e46277286c2 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -27,7 +27,7 @@ NO_PERF_REGS := 1
ifeq ($(ARCH),x86)
$(call detected,CONFIG_X86)
ifeq (${IS_64_BIT}, 1)
- CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT
+ CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated
ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
$(call detected,CONFIG_X86_64)
@@ -295,9 +295,6 @@ ifndef NO_LIBELF
CFLAGS += -DHAVE_ELF_GETPHDRNUM_SUPPORT
endif
- # include ARCH specific config
- -include $(src-perf)/arch/$(ARCH)/Makefile
-
ifndef NO_DWARF
ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
index 6461e02ab940..3573f315f955 100644
--- a/tools/perf/jvmti/jvmti_agent.c
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -92,6 +92,22 @@ error:
return ret;
}
+static int use_arch_timestamp;
+
+static inline uint64_t
+get_arch_timestamp(void)
+{
+#if defined(__i386__) || defined(__x86_64__)
+ unsigned int low, high;
+
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
+
+ return low | ((uint64_t)high) << 32;
+#else
+ return 0;
+#endif
+}
+
#define NSEC_PER_SEC 1000000000
static int perf_clk_id = CLOCK_MONOTONIC;
@@ -107,6 +123,9 @@ perf_get_timestamp(void)
struct timespec ts;
int ret;
+ if (use_arch_timestamp)
+ return get_arch_timestamp();
+
ret = clock_gettime(perf_clk_id, &ts);
if (ret)
return 0;
@@ -203,6 +222,17 @@ perf_close_marker_file(void)
munmap(marker_addr, pgsz);
}
+static void
+init_arch_timestamp(void)
+{
+ char *str = getenv("JITDUMP_USE_ARCH_TIMESTAMP");
+
+ if (!str || !*str || !strcmp(str, "0"))
+ return;
+
+ use_arch_timestamp = 1;
+}
+
void *jvmti_open(void)
{
int pad_cnt;
@@ -211,11 +241,17 @@ void *jvmti_open(void)
int fd;
FILE *fp;
+ init_arch_timestamp();
+
/*
* check if clockid is supported
*/
- if (!perf_get_timestamp())
- warnx("jvmti: kernel does not support %d clock id", perf_clk_id);
+ if (!perf_get_timestamp()) {
+ if (use_arch_timestamp)
+ warnx("jvmti: arch timestamp not supported");
+ else
+ warnx("jvmti: kernel does not support %d clock id", perf_clk_id);
+ }
memset(&header, 0, sizeof(header));
@@ -263,6 +299,9 @@ void *jvmti_open(void)
header.timestamp = perf_get_timestamp();
+ if (use_arch_timestamp)
+ header.flags |= JITDUMP_FLAGS_ARCH_TIMESTAMP;
+
if (!fwrite(&header, sizeof(header), 1, fp)) {
warn("jvmti: cannot write dumpfile header");
goto error;
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index aaee0a782747..797000842d40 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -17,6 +17,7 @@
#include <subcmd/parse-options.h>
#include "util/bpf-loader.h"
#include "util/debug.h"
+#include <api/fs/fs.h>
#include <api/fs/tracing_path.h>
#include <pthread.h>
#include <stdlib.h>
@@ -308,9 +309,11 @@ static int handle_alias(int *argcp, const char ***argv)
if (*argcp > 1) {
struct strbuf buf;
- strbuf_init(&buf, PATH_MAX);
- strbuf_addstr(&buf, alias_string);
- sq_quote_argv(&buf, (*argv) + 1, PATH_MAX);
+ if (strbuf_init(&buf, PATH_MAX) < 0 ||
+ strbuf_addstr(&buf, alias_string) < 0 ||
+ sq_quote_argv(&buf, (*argv) + 1,
+ PATH_MAX) < 0)
+ die("Failed to allocate memory.");
free(alias_string);
alias_string = buf.buf;
}
@@ -533,6 +536,7 @@ int main(int argc, const char **argv)
{
const char *cmd;
char sbuf[STRERR_BUFSIZE];
+ int value;
/* libsubcmd init */
exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT);
@@ -542,6 +546,9 @@ int main(int argc, const char **argv)
page_size = sysconf(_SC_PAGE_SIZE);
cacheline_size = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
+ if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0)
+ sysctl_perf_event_max_stack = value;
+
cmd = extract_argv0_path(argv[0]);
if (!cmd)
cmd = "perf-help";
@@ -549,6 +556,7 @@ int main(int argc, const char **argv)
srandom(time(NULL));
perf_config(perf_default_config, NULL);
+ set_buildid_dir(NULL);
/* get debugfs/tracefs mount point from /proc/mounts */
tracing_path_mount();
@@ -572,7 +580,6 @@ int main(int argc, const char **argv)
}
if (!prefixcmp(cmd, "trace")) {
#ifdef HAVE_LIBAUDIT_SUPPORT
- set_buildid_dir(NULL);
setup_path();
argv[0] = "trace";
return cmd_trace(argc, argv, NULL);
@@ -587,7 +594,6 @@ int main(int argc, const char **argv)
argc--;
handle_options(&argv, &argc, NULL);
commit_pager_choice();
- set_buildid_dir(NULL);
if (argc > 0) {
if (!prefixcmp(argv[0], "--"))
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 5381a01c0610..cd8f1b150f9e 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -52,7 +52,6 @@ struct record_opts {
bool sample_weight;
bool sample_time;
bool sample_time_set;
- bool callgraph_set;
bool period;
bool running_time;
bool full_auxtrace;
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 1b02cdc0cab6..7656ff8aa066 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -34,10 +34,9 @@ import datetime
#
# ubuntu:
#
-# $ sudo apt-get install postgresql
+# $ sudo apt-get install postgresql python-pyside.qtsql libqt4-sql-psql
# $ sudo su - postgres
-# $ createuser <your user id here>
-# Shall the new role be a superuser? (y/n) y
+# $ createuser -s <your user id here>
#
# An example of using this script with Intel PT:
#
@@ -224,11 +223,14 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
perf_db_export_mode = True
perf_db_export_calls = False
+perf_db_export_callchains = False
+
def usage():
- print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>]"
+ print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]"
print >> sys.stderr, "where: columns 'all' or 'branches'"
- print >> sys.stderr, " calls 'calls' => create calls table"
+ print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
+ print >> sys.stderr, " callchains 'callchains' => create call_paths table"
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
@@ -246,9 +248,11 @@ if columns not in ("all", "branches"):
branches = (columns == "branches")
-if (len(sys.argv) >= 4):
- if (sys.argv[3] == "calls"):
+for i in range(3,len(sys.argv)):
+ if (sys.argv[i] == "calls"):
perf_db_export_calls = True
+ elif (sys.argv[i] == "callchains"):
+ perf_db_export_callchains = True
else:
usage()
@@ -359,14 +363,16 @@ else:
'transaction bigint,'
'data_src bigint,'
'branch_type integer,'
- 'in_tx boolean)')
+ 'in_tx boolean,'
+ 'call_path_id bigint)')
-if perf_db_export_calls:
+if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'CREATE TABLE call_paths ('
'id bigint NOT NULL,'
'parent_id bigint,'
'symbol_id bigint,'
'ip bigint)')
+if perf_db_export_calls:
do_query(query, 'CREATE TABLE calls ('
'id bigint NOT NULL,'
'thread_id bigint,'
@@ -428,7 +434,7 @@ do_query(query, 'CREATE VIEW comm_threads_view AS '
'(SELECT tid FROM threads WHERE id = thread_id) AS tid'
' FROM comm_threads')
-if perf_db_export_calls:
+if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'CREATE VIEW call_paths_view AS '
'SELECT '
'c.id,'
@@ -444,6 +450,7 @@ if perf_db_export_calls:
'(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
'(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name'
' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
+if perf_db_export_calls:
do_query(query, 'CREATE VIEW calls_view AS '
'SELECT '
'calls.id,'
@@ -541,8 +548,9 @@ dso_file = open_output_file("dso_table.bin")
symbol_file = open_output_file("symbol_table.bin")
branch_type_file = open_output_file("branch_type_table.bin")
sample_file = open_output_file("sample_table.bin")
-if perf_db_export_calls:
+if perf_db_export_calls or perf_db_export_callchains:
call_path_file = open_output_file("call_path_table.bin")
+if perf_db_export_calls:
call_file = open_output_file("call_table.bin")
def trace_begin():
@@ -554,8 +562,8 @@ def trace_begin():
comm_table(0, "unknown")
dso_table(0, 0, "unknown", "unknown", "")
symbol_table(0, 0, 0, 0, 0, "unknown")
- sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
- if perf_db_export_calls:
+ sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+ if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
unhandled_count = 0
@@ -571,8 +579,9 @@ def trace_end():
copy_output_file(symbol_file, "symbols")
copy_output_file(branch_type_file, "branch_types")
copy_output_file(sample_file, "samples")
- if perf_db_export_calls:
+ if perf_db_export_calls or perf_db_export_callchains:
copy_output_file(call_path_file, "call_paths")
+ if perf_db_export_calls:
copy_output_file(call_file, "calls")
print datetime.datetime.today(), "Removing intermediate files..."
@@ -585,8 +594,9 @@ def trace_end():
remove_output_file(symbol_file)
remove_output_file(branch_type_file)
remove_output_file(sample_file)
- if perf_db_export_calls:
+ if perf_db_export_calls or perf_db_export_callchains:
remove_output_file(call_path_file)
+ if perf_db_export_calls:
remove_output_file(call_file)
os.rmdir(output_dir_name)
print datetime.datetime.today(), "Adding primary keys"
@@ -599,8 +609,9 @@ def trace_end():
do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)')
- if perf_db_export_calls:
+ if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)')
+ if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
print datetime.datetime.today(), "Adding foreign keys"
@@ -623,10 +634,11 @@ def trace_end():
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),'
'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)')
- if perf_db_export_calls:
+ if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'ALTER TABLE call_paths '
'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)')
+ if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls '
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
@@ -694,11 +706,11 @@ def branch_type_table(branch_type, name, *x):
value = struct.pack(fmt, 2, 4, branch_type, n, name)
branch_type_file.write(value)
-def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, *x):
+def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, *x):
if branches:
- value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiB", 17, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx)
+ value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiq", 18, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id)
else:
- value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiB", 21, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx)
+ value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiq", 22, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id)
sample_file.write(value)
def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index 1ba628ed049a..66a28982547b 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -37,6 +37,8 @@ perf-y += topology.o
perf-y += cpumap.o
perf-y += stat.o
perf-y += event_update.o
+perf-y += event-times.o
+perf-y += backward-ring-buffer.o
$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
$(call rule_mkdir)
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
new file mode 100644
index 000000000000..d9ba991a9a30
--- /dev/null
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -0,0 +1,151 @@
+/*
+ * Test backward bit in event attribute, read ring buffer from end to
+ * beginning
+ */
+
+#include <perf.h>
+#include <evlist.h>
+#include <sys/prctl.h>
+#include "tests.h"
+#include "debug.h"
+
+#define NR_ITERS 111
+
+static void testcase(void)
+{
+ int i;
+
+ for (i = 0; i < NR_ITERS; i++) {
+ char proc_name[10];
+
+ snprintf(proc_name, sizeof(proc_name), "p:%d\n", i);
+ prctl(PR_SET_NAME, proc_name);
+ }
+}
+
+static int count_samples(struct perf_evlist *evlist, int *sample_count,
+ int *comm_count)
+{
+ int i;
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ union perf_event *event;
+
+ perf_evlist__mmap_read_catchup(evlist, i);
+ while ((event = perf_evlist__mmap_read_backward(evlist, i)) != NULL) {
+ const u32 type = event->header.type;
+
+ switch (type) {
+ case PERF_RECORD_SAMPLE:
+ (*sample_count)++;
+ break;
+ case PERF_RECORD_COMM:
+ (*comm_count)++;
+ break;
+ default:
+ pr_err("Unexpected record of type %d\n", type);
+ return TEST_FAIL;
+ }
+ }
+ }
+ return TEST_OK;
+}
+
+static int do_test(struct perf_evlist *evlist, int mmap_pages,
+ int *sample_count, int *comm_count)
+{
+ int err;
+ char sbuf[STRERR_BUFSIZE];
+
+ err = perf_evlist__mmap(evlist, mmap_pages, true);
+ if (err < 0) {
+ pr_debug("perf_evlist__mmap: %s\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
+ return TEST_FAIL;
+ }
+
+ perf_evlist__enable(evlist);
+ testcase();
+ perf_evlist__disable(evlist);
+
+ err = count_samples(evlist, sample_count, comm_count);
+ perf_evlist__munmap(evlist);
+ return err;
+}
+
+
+int test__backward_ring_buffer(int subtest __maybe_unused)
+{
+ int ret = TEST_SKIP, err, sample_count = 0, comm_count = 0;
+ char pid[16], sbuf[STRERR_BUFSIZE];
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel __maybe_unused;
+ struct parse_events_error parse_error;
+ struct record_opts opts = {
+ .target = {
+ .uid = UINT_MAX,
+ .uses_mmap = true,
+ },
+ .freq = 0,
+ .mmap_pages = 256,
+ .default_interval = 1,
+ };
+
+ snprintf(pid, sizeof(pid), "%d", getpid());
+ pid[sizeof(pid) - 1] = '\0';
+ opts.target.tid = opts.target.pid = pid;
+
+ evlist = perf_evlist__new();
+ if (!evlist) {
+ pr_debug("No ehough memory to create evlist\n");
+ return TEST_FAIL;
+ }
+
+ err = perf_evlist__create_maps(evlist, &opts.target);
+ if (err < 0) {
+ pr_debug("Not enough memory to create thread/cpu maps\n");
+ goto out_delete_evlist;
+ }
+
+ bzero(&parse_error, sizeof(parse_error));
+ err = parse_events(evlist, "syscalls:sys_enter_prctl", &parse_error);
+ if (err) {
+ pr_debug("Failed to parse tracepoint event, try use root\n");
+ ret = TEST_SKIP;
+ goto out_delete_evlist;
+ }
+
+ perf_evlist__config(evlist, &opts, NULL);
+
+ /* Set backward bit, ring buffer should be writing from end */
+ evlist__for_each(evlist, evsel)
+ evsel->attr.write_backward = 1;
+
+ err = perf_evlist__open(evlist);
+ if (err < 0) {
+ pr_debug("perf_evlist__open: %s\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
+ goto out_delete_evlist;
+ }
+
+ ret = TEST_FAIL;
+ err = do_test(evlist, opts.mmap_pages, &sample_count,
+ &comm_count);
+ if (err != TEST_OK)
+ goto out_delete_evlist;
+
+ if ((sample_count != NR_ITERS) || (comm_count != NR_ITERS)) {
+ pr_err("Unexpected counter: sample_count=%d, comm_count=%d\n",
+ sample_count, comm_count);
+ goto out_delete_evlist;
+ }
+
+ err = do_test(evlist, 1, &sample_count, &comm_count);
+ if (err != TEST_OK)
+ goto out_delete_evlist;
+
+ ret = TEST_OK;
+out_delete_evlist:
+ perf_evlist__delete(evlist);
+ return ret;
+}
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index 199501c71e27..f31eed31c1a9 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -138,7 +138,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
perf_evlist__splice_list_tail(evlist, &parse_evlist.list);
evlist->nr_groups = parse_evlist.nr_groups;
- perf_evlist__config(evlist, &opts);
+ perf_evlist__config(evlist, &opts, NULL);
err = perf_evlist__open(evlist);
if (err < 0) {
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index f2b1dcac45d3..0e95c20ecf6e 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -204,6 +204,14 @@ static struct test generic_tests[] = {
.func = test__event_update,
},
{
+ .desc = "Test events times",
+ .func = test__event_times,
+ },
+ {
+ .desc = "Test backward reading from ring buffer",
+ .func = test__backward_ring_buffer,
+ },
+ {
.func = NULL,
},
};
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index abd3f0ec0c0b..68a69a195545 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -532,7 +532,7 @@ static int do_test_code_reading(bool try_kcore)
goto out_put;
}
- perf_evlist__config(evlist, &opts);
+ perf_evlist__config(evlist, &opts, NULL);
evsel = perf_evlist__first(evlist);
diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c
index dc673ff7c437..8cf0d9e189a8 100644
--- a/tools/perf/tests/dso-data.c
+++ b/tools/perf/tests/dso-data.c
@@ -202,7 +202,7 @@ static int dsos__create(int cnt, int size)
{
int i;
- dsos = malloc(sizeof(dsos) * cnt);
+ dsos = malloc(sizeof(*dsos) * cnt);
TEST_ASSERT_VAL("failed to alloc dsos array", dsos);
for (i = 0; i < cnt; i++) {
diff --git a/tools/perf/tests/event-times.c b/tools/perf/tests/event-times.c
new file mode 100644
index 000000000000..95fb744f6628
--- /dev/null
+++ b/tools/perf/tests/event-times.c
@@ -0,0 +1,236 @@
+#include <linux/compiler.h>
+#include <string.h>
+#include "tests.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "util.h"
+#include "debug.h"
+#include "thread_map.h"
+#include "target.h"
+
+static int attach__enable_on_exec(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__last(evlist);
+ struct target target = {
+ .uid = UINT_MAX,
+ };
+ const char *argv[] = { "true", NULL, };
+ char sbuf[STRERR_BUFSIZE];
+ int err;
+
+ pr_debug("attaching to spawned child, enable on exec\n");
+
+ err = perf_evlist__create_maps(evlist, &target);
+ if (err < 0) {
+ pr_debug("Not enough memory to create thread/cpu maps\n");
+ return err;
+ }
+
+ err = perf_evlist__prepare_workload(evlist, &target, argv, false, NULL);
+ if (err < 0) {
+ pr_debug("Couldn't run the workload!\n");
+ return err;
+ }
+
+ evsel->attr.enable_on_exec = 1;
+
+ err = perf_evlist__open(evlist);
+ if (err < 0) {
+ pr_debug("perf_evlist__open: %s\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
+ return err;
+ }
+
+ return perf_evlist__start_workload(evlist) == 1 ? TEST_OK : TEST_FAIL;
+}
+
+static int detach__enable_on_exec(struct perf_evlist *evlist)
+{
+ waitpid(evlist->workload.pid, NULL, 0);
+ return 0;
+}
+
+static int attach__current_disabled(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__last(evlist);
+ struct thread_map *threads;
+ int err;
+
+ pr_debug("attaching to current thread as disabled\n");
+
+ threads = thread_map__new(-1, getpid(), UINT_MAX);
+ if (threads == NULL) {
+ pr_debug("thread_map__new\n");
+ return -1;
+ }
+
+ evsel->attr.disabled = 1;
+
+ err = perf_evsel__open_per_thread(evsel, threads);
+ if (err) {
+ pr_debug("Failed to open event cpu-clock:u\n");
+ return err;
+ }
+
+ thread_map__put(threads);
+ return perf_evsel__enable(evsel) == 0 ? TEST_OK : TEST_FAIL;
+}
+
+static int attach__current_enabled(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__last(evlist);
+ struct thread_map *threads;
+ int err;
+
+ pr_debug("attaching to current thread as enabled\n");
+
+ threads = thread_map__new(-1, getpid(), UINT_MAX);
+ if (threads == NULL) {
+ pr_debug("failed to call thread_map__new\n");
+ return -1;
+ }
+
+ err = perf_evsel__open_per_thread(evsel, threads);
+
+ thread_map__put(threads);
+ return err == 0 ? TEST_OK : TEST_FAIL;
+}
+
+static int detach__disable(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__last(evlist);
+
+ return perf_evsel__enable(evsel);
+}
+
+static int attach__cpu_disabled(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__last(evlist);
+ struct cpu_map *cpus;
+ int err;
+
+ pr_debug("attaching to CPU 0 as enabled\n");
+
+ cpus = cpu_map__new("0");
+ if (cpus == NULL) {
+ pr_debug("failed to call cpu_map__new\n");
+ return -1;
+ }
+
+ evsel->attr.disabled = 1;
+
+ err = perf_evsel__open_per_cpu(evsel, cpus);
+ if (err) {
+ if (err == -EACCES)
+ return TEST_SKIP;
+
+ pr_debug("Failed to open event cpu-clock:u\n");
+ return err;
+ }
+
+ cpu_map__put(cpus);
+ return perf_evsel__enable(evsel);
+}
+
+static int attach__cpu_enabled(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__last(evlist);
+ struct cpu_map *cpus;
+ int err;
+
+ pr_debug("attaching to CPU 0 as enabled\n");
+
+ cpus = cpu_map__new("0");
+ if (cpus == NULL) {
+ pr_debug("failed to call cpu_map__new\n");
+ return -1;
+ }
+
+ err = perf_evsel__open_per_cpu(evsel, cpus);
+ if (err == -EACCES)
+ return TEST_SKIP;
+
+ cpu_map__put(cpus);
+ return err ? TEST_FAIL : TEST_OK;
+}
+
+static int test_times(int (attach)(struct perf_evlist *),
+ int (detach)(struct perf_evlist *))
+{
+ struct perf_counts_values count;
+ struct perf_evlist *evlist = NULL;
+ struct perf_evsel *evsel;
+ int err = -1, i;
+
+ evlist = perf_evlist__new();
+ if (!evlist) {
+ pr_debug("failed to create event list\n");
+ goto out_err;
+ }
+
+ err = parse_events(evlist, "cpu-clock:u", NULL);
+ if (err) {
+ pr_debug("failed to parse event cpu-clock:u\n");
+ goto out_err;
+ }
+
+ evsel = perf_evlist__last(evlist);
+ evsel->attr.read_format |=
+ PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING;
+
+ err = attach(evlist);
+ if (err == TEST_SKIP) {
+ pr_debug(" SKIP : not enough rights\n");
+ return err;
+ }
+
+ TEST_ASSERT_VAL("failed to attach", !err);
+
+ for (i = 0; i < 100000000; i++) { }
+
+ TEST_ASSERT_VAL("failed to detach", !detach(evlist));
+
+ perf_evsel__read(evsel, 0, 0, &count);
+
+ err = !(count.ena == count.run);
+
+ pr_debug(" %s: ena %" PRIu64", run %" PRIu64"\n",
+ !err ? "OK " : "FAILED",
+ count.ena, count.run);
+
+out_err:
+ if (evlist)
+ perf_evlist__delete(evlist);
+ return !err ? TEST_OK : TEST_FAIL;
+}
+
+/*
+ * This test creates software event 'cpu-clock'
+ * attaches it in several ways (explained below)
+ * and checks that enabled and running times
+ * match.
+ */
+int test__event_times(int subtest __maybe_unused)
+{
+ int err, ret = 0;
+
+#define _T(attach, detach) \
+ err = test_times(attach, detach); \
+ if (err && (ret == TEST_OK || ret == TEST_SKIP)) \
+ ret = err;
+
+ /* attach on newly spawned process after exec */
+ _T(attach__enable_on_exec, detach__enable_on_exec)
+ /* attach on current process as enabled */
+ _T(attach__current_enabled, detach__disable)
+ /* attach on current process as disabled */
+ _T(attach__current_disabled, detach__disable)
+ /* attach on cpu as disabled */
+ _T(attach__cpu_disabled, detach__disable)
+ /* attach on cpu as enabled */
+ _T(attach__cpu_enabled, detach__disable)
+
+#undef _T
+ return ret;
+}
diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c
index 012eab5d1df1..63ecf21750eb 100644
--- a/tools/perf/tests/event_update.c
+++ b/tools/perf/tests/event_update.c
@@ -30,7 +30,7 @@ static int process_event_scale(struct perf_tool *tool __maybe_unused,
TEST_ASSERT_VAL("wrong id", ev->id == 123);
TEST_ASSERT_VAL("wrong id", ev->type == PERF_EVENT_UPDATE__SCALE);
- TEST_ASSERT_VAL("wrong scale", ev_data->scale = 0.123);
+ TEST_ASSERT_VAL("wrong scale", ev_data->scale == 0.123);
return 0;
}
diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c
index f55f4bd47932..6b21746d6eec 100644
--- a/tools/perf/tests/hists_common.c
+++ b/tools/perf/tests/hists_common.c
@@ -161,7 +161,7 @@ void print_hists_in(struct hists *hists)
struct rb_root *root;
struct rb_node *node;
- if (sort__need_collapse)
+ if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index ed5aa9eaeb6c..a9e3db3afac4 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -101,7 +101,7 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
if (machine__resolve(machine, &al, &sample) < 0)
goto out;
- if (hist_entry_iter__add(&iter, &al, PERF_MAX_STACK_DEPTH,
+ if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
NULL) < 0) {
addr_location__put(&al);
goto out;
@@ -126,7 +126,7 @@ static void del_hist_entries(struct hists *hists)
struct rb_root *root_out;
struct rb_node *node;
- if (sort__need_collapse)
+ if (hists__has(hists, need_collapse))
root_in = &hists->entries_collapsed;
else
root_in = hists->entries_in;
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index b825d24f8186..e846f8c42013 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -81,7 +81,7 @@ static int add_hist_entries(struct perf_evlist *evlist,
al.socket = fake_samples[i].socket;
if (hist_entry_iter__add(&iter, &al,
- PERF_MAX_STACK_DEPTH, NULL) < 0) {
+ sysctl_perf_event_max_stack, NULL) < 0) {
addr_location__put(&al);
goto out;
}
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 358324e47805..acf5a1301c07 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -145,7 +145,7 @@ static int __validate_match(struct hists *hists)
/*
* Only entries from fake_common_samples should have a pair.
*/
- if (sort__need_collapse)
+ if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
@@ -197,7 +197,7 @@ static int __validate_link(struct hists *hists, int idx)
* and some entries will have no pair. However every entry
* in other hists should have (dummy) pair.
*/
- if (sort__need_collapse)
+ if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index d3556fbe8c5c..63c5efaba1b5 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -67,7 +67,7 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
if (machine__resolve(machine, &al, &sample) < 0)
goto out;
- if (hist_entry_iter__add(&iter, &al, PERF_MAX_STACK_DEPTH,
+ if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
NULL) < 0) {
addr_location__put(&al);
goto out;
@@ -92,7 +92,7 @@ static void del_hist_entries(struct hists *hists)
struct rb_root *root_out;
struct rb_node *node;
- if (sort__need_collapse)
+ if (hists__has(hists, need_collapse))
root_in = &hists->entries_collapsed;
else
root_in = hists->entries_in;
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index ddb78fae064a..614e45a3c603 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -80,7 +80,7 @@ int test__keep_tracking(int subtest __maybe_unused)
CHECK__(parse_events(evlist, "dummy:u", NULL));
CHECK__(parse_events(evlist, "cycles:u", NULL));
- perf_evlist__config(evlist, &opts);
+ perf_evlist__config(evlist, &opts, NULL);
evsel = perf_evlist__first(evlist);
diff --git a/tools/perf/tests/openat-syscall-tp-fields.c b/tools/perf/tests/openat-syscall-tp-fields.c
index eb99a105f31c..4344fe482c1d 100644
--- a/tools/perf/tests/openat-syscall-tp-fields.c
+++ b/tools/perf/tests/openat-syscall-tp-fields.c
@@ -44,7 +44,7 @@ int test__syscall_openat_tp_fields(int subtest __maybe_unused)
goto out_delete_evlist;
}
- perf_evsel__config(evsel, &opts);
+ perf_evsel__config(evsel, &opts, NULL);
thread_map__set_pid(evlist->threads, 0, getpid());
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index 1cc78cefe399..b836ee6a8d9b 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -99,7 +99,7 @@ int test__PERF_RECORD(int subtest __maybe_unused)
perf_evsel__set_sample_bit(evsel, CPU);
perf_evsel__set_sample_bit(evsel, TID);
perf_evsel__set_sample_bit(evsel, TIME);
- perf_evlist__config(evlist, &opts);
+ perf_evlist__config(evlist, &opts, NULL);
err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
if (err < 0) {
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index ebd80168d51e..39a689bf7574 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -417,7 +417,7 @@ int test__switch_tracking(int subtest __maybe_unused)
perf_evsel__set_sample_bit(tracking_evsel, TIME);
/* Config events */
- perf_evlist__config(evlist, &opts);
+ perf_evlist__config(evlist, &opts, NULL);
/* Check moved event is still at the front */
if (cycles_evsel != perf_evlist__first(evlist)) {
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 82b2b5e6ba7c..c57e72c826d2 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -85,6 +85,8 @@ int test__synthesize_stat_config(int subtest);
int test__synthesize_stat(int subtest);
int test__synthesize_stat_round(int subtest);
int test__event_update(int subtest);
+int test__event_times(int subtest);
+int test__backward_ring_buffer(int subtest);
#if defined(__arm__) || defined(__aarch64__)
#ifdef HAVE_DWARF_UNWIND_SUPPORT
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 630b0b409b97..e63abab7d5a1 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -54,8 +54,14 @@ int test__vmlinux_matches_kallsyms(int subtest __maybe_unused)
* Step 3:
*
* Load and split /proc/kallsyms into multiple maps, one per module.
+ * Do not use kcore, as this test was designed before kcore support
+ * and has parts that only make sense if using the non-kcore code.
+ * XXX: extend it to stress the kcorre code as well, hint: the list
+ * of modules extracted from /proc/kcore, in its current form, can't
+ * be compacted against the list of modules found in the "vmlinux"
+ * code and with the one got from /proc/modules from the "kallsyms" code.
*/
- if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
+ if (__machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, true, NULL) <= 0) {
pr_debug("dso__load_kallsyms ");
goto out;
}
@@ -157,6 +163,9 @@ next_pair:
pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
mem_start, sym->name, pair->name);
+ } else {
+ pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
+ mem_start, sym->name, first_pair->name);
}
}
} else
diff --git a/tools/perf/trace/beauty/eventfd.c b/tools/perf/trace/beauty/eventfd.c
new file mode 100644
index 000000000000..d64f4a9128a1
--- /dev/null
+++ b/tools/perf/trace/beauty/eventfd.c
@@ -0,0 +1,38 @@
+#include <sys/eventfd.h>
+
+#ifndef EFD_SEMAPHORE
+#define EFD_SEMAPHORE 1
+#endif
+
+#ifndef EFD_NONBLOCK
+#define EFD_NONBLOCK 00004000
+#endif
+
+#ifndef EFD_CLOEXEC
+#define EFD_CLOEXEC 02000000
+#endif
+
+static size_t syscall_arg__scnprintf_eventfd_flags(char *bf, size_t size, struct syscall_arg *arg)
+{
+ int printed = 0, flags = arg->val;
+
+ if (flags == 0)
+ return scnprintf(bf, size, "NONE");
+#define P_FLAG(n) \
+ if (flags & EFD_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ flags &= ~EFD_##n; \
+ }
+
+ P_FLAG(SEMAPHORE);
+ P_FLAG(CLOEXEC);
+ P_FLAG(NONBLOCK);
+#undef P_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+ return printed;
+}
+
+#define SCA_EFD_FLAGS syscall_arg__scnprintf_eventfd_flags
diff --git a/tools/perf/trace/beauty/flock.c b/tools/perf/trace/beauty/flock.c
new file mode 100644
index 000000000000..021bb48c6336
--- /dev/null
+++ b/tools/perf/trace/beauty/flock.c
@@ -0,0 +1,31 @@
+
+static size_t syscall_arg__scnprintf_flock(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int printed = 0, op = arg->val;
+
+ if (op == 0)
+ return scnprintf(bf, size, "NONE");
+#define P_CMD(cmd) \
+ if ((op & LOCK_##cmd) == LOCK_##cmd) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #cmd); \
+ op &= ~LOCK_##cmd; \
+ }
+
+ P_CMD(SH);
+ P_CMD(EX);
+ P_CMD(NB);
+ P_CMD(UN);
+ P_CMD(MAND);
+ P_CMD(RW);
+ P_CMD(READ);
+ P_CMD(WRITE);
+#undef P_OP
+
+ if (op)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", op);
+
+ return printed;
+}
+
+#define SCA_FLOCK syscall_arg__scnprintf_flock
diff --git a/tools/perf/trace/beauty/futex_op.c b/tools/perf/trace/beauty/futex_op.c
new file mode 100644
index 000000000000..e2476211f22d
--- /dev/null
+++ b/tools/perf/trace/beauty/futex_op.c
@@ -0,0 +1,44 @@
+#include <linux/futex.h>
+
+static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct syscall_arg *arg)
+{
+ enum syscall_futex_args {
+ SCF_UADDR = (1 << 0),
+ SCF_OP = (1 << 1),
+ SCF_VAL = (1 << 2),
+ SCF_TIMEOUT = (1 << 3),
+ SCF_UADDR2 = (1 << 4),
+ SCF_VAL3 = (1 << 5),
+ };
+ int op = arg->val;
+ int cmd = op & FUTEX_CMD_MASK;
+ size_t printed = 0;
+
+ switch (cmd) {
+#define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n);
+ P_FUTEX_OP(WAIT); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
+ P_FUTEX_OP(WAKE); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
+ P_FUTEX_OP(FD); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
+ P_FUTEX_OP(REQUEUE); arg->mask |= SCF_VAL3|SCF_TIMEOUT; break;
+ P_FUTEX_OP(CMP_REQUEUE); arg->mask |= SCF_TIMEOUT; break;
+ P_FUTEX_OP(CMP_REQUEUE_PI); arg->mask |= SCF_TIMEOUT; break;
+ P_FUTEX_OP(WAKE_OP); break;
+ P_FUTEX_OP(LOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
+ P_FUTEX_OP(UNLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
+ P_FUTEX_OP(TRYLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
+ P_FUTEX_OP(WAIT_BITSET); arg->mask |= SCF_UADDR2; break;
+ P_FUTEX_OP(WAKE_BITSET); arg->mask |= SCF_UADDR2; break;
+ P_FUTEX_OP(WAIT_REQUEUE_PI); break;
+ default: printed = scnprintf(bf, size, "%#x", cmd); break;
+ }
+
+ if (op & FUTEX_PRIVATE_FLAG)
+ printed += scnprintf(bf + printed, size - printed, "|PRIV");
+
+ if (op & FUTEX_CLOCK_REALTIME)
+ printed += scnprintf(bf + printed, size - printed, "|CLKRT");
+
+ return printed;
+}
+
+#define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op
diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c
new file mode 100644
index 000000000000..3444a4d5382d
--- /dev/null
+++ b/tools/perf/trace/beauty/mmap.c
@@ -0,0 +1,158 @@
+#include <sys/mman.h>
+
+static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int printed = 0, prot = arg->val;
+
+ if (prot == PROT_NONE)
+ return scnprintf(bf, size, "NONE");
+#define P_MMAP_PROT(n) \
+ if (prot & PROT_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ prot &= ~PROT_##n; \
+ }
+
+ P_MMAP_PROT(EXEC);
+ P_MMAP_PROT(READ);
+ P_MMAP_PROT(WRITE);
+#ifdef PROT_SEM
+ P_MMAP_PROT(SEM);
+#endif
+ P_MMAP_PROT(GROWSDOWN);
+ P_MMAP_PROT(GROWSUP);
+#undef P_MMAP_PROT
+
+ if (prot)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", prot);
+
+ return printed;
+}
+
+#define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
+
+#ifndef MAP_STACK
+# define MAP_STACK 0x20000
+#endif
+
+static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int printed = 0, flags = arg->val;
+
+#define P_MMAP_FLAG(n) \
+ if (flags & MAP_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ flags &= ~MAP_##n; \
+ }
+
+ P_MMAP_FLAG(SHARED);
+ P_MMAP_FLAG(PRIVATE);
+#ifdef MAP_32BIT
+ P_MMAP_FLAG(32BIT);
+#endif
+ P_MMAP_FLAG(ANONYMOUS);
+ P_MMAP_FLAG(DENYWRITE);
+ P_MMAP_FLAG(EXECUTABLE);
+ P_MMAP_FLAG(FILE);
+ P_MMAP_FLAG(FIXED);
+ P_MMAP_FLAG(GROWSDOWN);
+#ifdef MAP_HUGETLB
+ P_MMAP_FLAG(HUGETLB);
+#endif
+ P_MMAP_FLAG(LOCKED);
+ P_MMAP_FLAG(NONBLOCK);
+ P_MMAP_FLAG(NORESERVE);
+ P_MMAP_FLAG(POPULATE);
+ P_MMAP_FLAG(STACK);
+#ifdef MAP_UNINITIALIZED
+ P_MMAP_FLAG(UNINITIALIZED);
+#endif
+#undef P_MMAP_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+ return printed;
+}
+
+#define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
+
+static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int printed = 0, flags = arg->val;
+
+#define P_MREMAP_FLAG(n) \
+ if (flags & MREMAP_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ flags &= ~MREMAP_##n; \
+ }
+
+ P_MREMAP_FLAG(MAYMOVE);
+#ifdef MREMAP_FIXED
+ P_MREMAP_FLAG(FIXED);
+#endif
+#undef P_MREMAP_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+ return printed;
+}
+
+#define SCA_MREMAP_FLAGS syscall_arg__scnprintf_mremap_flags
+
+#ifndef MADV_HWPOISON
+#define MADV_HWPOISON 100
+#endif
+
+#ifndef MADV_MERGEABLE
+#define MADV_MERGEABLE 12
+#endif
+
+#ifndef MADV_UNMERGEABLE
+#define MADV_UNMERGEABLE 13
+#endif
+
+static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int behavior = arg->val;
+
+ switch (behavior) {
+#define P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n)
+ P_MADV_BHV(NORMAL);
+ P_MADV_BHV(RANDOM);
+ P_MADV_BHV(SEQUENTIAL);
+ P_MADV_BHV(WILLNEED);
+ P_MADV_BHV(DONTNEED);
+ P_MADV_BHV(REMOVE);
+ P_MADV_BHV(DONTFORK);
+ P_MADV_BHV(DOFORK);
+ P_MADV_BHV(HWPOISON);
+#ifdef MADV_SOFT_OFFLINE
+ P_MADV_BHV(SOFT_OFFLINE);
+#endif
+ P_MADV_BHV(MERGEABLE);
+ P_MADV_BHV(UNMERGEABLE);
+#ifdef MADV_HUGEPAGE
+ P_MADV_BHV(HUGEPAGE);
+#endif
+#ifdef MADV_NOHUGEPAGE
+ P_MADV_BHV(NOHUGEPAGE);
+#endif
+#ifdef MADV_DONTDUMP
+ P_MADV_BHV(DONTDUMP);
+#endif
+#ifdef MADV_DODUMP
+ P_MADV_BHV(DODUMP);
+#endif
+#undef P_MADV_PHV
+ default: break;
+ }
+
+ return scnprintf(bf, size, "%#x", behavior);
+}
+
+#define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
diff --git a/tools/perf/trace/beauty/mode_t.c b/tools/perf/trace/beauty/mode_t.c
new file mode 100644
index 000000000000..930d8fef2400
--- /dev/null
+++ b/tools/perf/trace/beauty/mode_t.c
@@ -0,0 +1,68 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+/* From include/linux/stat.h */
+#ifndef S_IRWXUGO
+#define S_IRWXUGO (S_IRWXU|S_IRWXG|S_IRWXO)
+#endif
+#ifndef S_IALLUGO
+#define S_IALLUGO (S_ISUID|S_ISGID|S_ISVTX|S_IRWXUGO)
+#endif
+#ifndef S_IRUGO
+#define S_IRUGO (S_IRUSR|S_IRGRP|S_IROTH)
+#endif
+#ifndef S_IWUGO
+#define S_IWUGO (S_IWUSR|S_IWGRP|S_IWOTH)
+#endif
+#ifndef S_IXUGO
+#define S_IXUGO (S_IXUSR|S_IXGRP|S_IXOTH)
+#endif
+
+static size_t syscall_arg__scnprintf_mode_t(char *bf, size_t size, struct syscall_arg *arg)
+{
+ int printed = 0, mode = arg->val;
+
+#define P_MODE(n) \
+ if ((mode & S_##n) == S_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ mode &= ~S_##n; \
+ }
+
+ P_MODE(IALLUGO);
+ P_MODE(IRWXUGO);
+ P_MODE(IRUGO);
+ P_MODE(IWUGO);
+ P_MODE(IXUGO);
+ P_MODE(IFMT);
+ P_MODE(IFSOCK);
+ P_MODE(IFLNK);
+ P_MODE(IFREG);
+ P_MODE(IFBLK);
+ P_MODE(IFDIR);
+ P_MODE(IFCHR);
+ P_MODE(IFIFO);
+ P_MODE(ISUID);
+ P_MODE(ISGID);
+ P_MODE(ISVTX);
+ P_MODE(IRWXU);
+ P_MODE(IRUSR);
+ P_MODE(IWUSR);
+ P_MODE(IXUSR);
+ P_MODE(IRWXG);
+ P_MODE(IRGRP);
+ P_MODE(IWGRP);
+ P_MODE(IXGRP);
+ P_MODE(IRWXO);
+ P_MODE(IROTH);
+ P_MODE(IWOTH);
+ P_MODE(IXOTH);
+#undef P_MODE
+
+ if (mode)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", mode);
+
+ return printed;
+}
+
+#define SCA_MODE_T syscall_arg__scnprintf_mode_t
diff --git a/tools/perf/trace/beauty/msg_flags.c b/tools/perf/trace/beauty/msg_flags.c
new file mode 100644
index 000000000000..07fa8a0acad6
--- /dev/null
+++ b/tools/perf/trace/beauty/msg_flags.c
@@ -0,0 +1,62 @@
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#ifndef MSG_PROBE
+#define MSG_PROBE 0x10
+#endif
+#ifndef MSG_WAITFORONE
+#define MSG_WAITFORONE 0x10000
+#endif
+#ifndef MSG_SENDPAGE_NOTLAST
+#define MSG_SENDPAGE_NOTLAST 0x20000
+#endif
+#ifndef MSG_FASTOPEN
+#define MSG_FASTOPEN 0x20000000
+#endif
+#ifndef MSG_CMSG_CLOEXEC
+# define MSG_CMSG_CLOEXEC 0x40000000
+#endif
+
+static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int printed = 0, flags = arg->val;
+
+ if (flags == 0)
+ return scnprintf(bf, size, "NONE");
+#define P_MSG_FLAG(n) \
+ if (flags & MSG_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ flags &= ~MSG_##n; \
+ }
+
+ P_MSG_FLAG(OOB);
+ P_MSG_FLAG(PEEK);
+ P_MSG_FLAG(DONTROUTE);
+ P_MSG_FLAG(TRYHARD);
+ P_MSG_FLAG(CTRUNC);
+ P_MSG_FLAG(PROBE);
+ P_MSG_FLAG(TRUNC);
+ P_MSG_FLAG(DONTWAIT);
+ P_MSG_FLAG(EOR);
+ P_MSG_FLAG(WAITALL);
+ P_MSG_FLAG(FIN);
+ P_MSG_FLAG(SYN);
+ P_MSG_FLAG(CONFIRM);
+ P_MSG_FLAG(RST);
+ P_MSG_FLAG(ERRQUEUE);
+ P_MSG_FLAG(NOSIGNAL);
+ P_MSG_FLAG(MORE);
+ P_MSG_FLAG(WAITFORONE);
+ P_MSG_FLAG(SENDPAGE_NOTLAST);
+ P_MSG_FLAG(FASTOPEN);
+ P_MSG_FLAG(CMSG_CLOEXEC);
+#undef P_MSG_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+ return printed;
+}
+
+#define SCA_MSG_FLAGS syscall_arg__scnprintf_msg_flags
diff --git a/tools/perf/trace/beauty/open_flags.c b/tools/perf/trace/beauty/open_flags.c
new file mode 100644
index 000000000000..0f3679e0cdcf
--- /dev/null
+++ b/tools/perf/trace/beauty/open_flags.c
@@ -0,0 +1,56 @@
+
+static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int printed = 0, flags = arg->val;
+
+ if (!(flags & O_CREAT))
+ arg->mask |= 1 << (arg->idx + 1); /* Mask the mode parm */
+
+ if (flags == 0)
+ return scnprintf(bf, size, "RDONLY");
+#define P_FLAG(n) \
+ if (flags & O_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ flags &= ~O_##n; \
+ }
+
+ P_FLAG(APPEND);
+ P_FLAG(ASYNC);
+ P_FLAG(CLOEXEC);
+ P_FLAG(CREAT);
+ P_FLAG(DIRECT);
+ P_FLAG(DIRECTORY);
+ P_FLAG(EXCL);
+ P_FLAG(LARGEFILE);
+ P_FLAG(NOATIME);
+ P_FLAG(NOCTTY);
+#ifdef O_NONBLOCK
+ P_FLAG(NONBLOCK);
+#elif O_NDELAY
+ P_FLAG(NDELAY);
+#endif
+#ifdef O_PATH
+ P_FLAG(PATH);
+#endif
+ P_FLAG(RDWR);
+#ifdef O_DSYNC
+ if ((flags & O_SYNC) == O_SYNC)
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC");
+ else {
+ P_FLAG(DSYNC);
+ }
+#else
+ P_FLAG(SYNC);
+#endif
+ P_FLAG(TRUNC);
+ P_FLAG(WRONLY);
+#undef P_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+ return printed;
+}
+
+#define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
diff --git a/tools/perf/trace/beauty/perf_event_open.c b/tools/perf/trace/beauty/perf_event_open.c
new file mode 100644
index 000000000000..311f09dd718d
--- /dev/null
+++ b/tools/perf/trace/beauty/perf_event_open.c
@@ -0,0 +1,43 @@
+#ifndef PERF_FLAG_FD_NO_GROUP
+# define PERF_FLAG_FD_NO_GROUP (1UL << 0)
+#endif
+
+#ifndef PERF_FLAG_FD_OUTPUT
+# define PERF_FLAG_FD_OUTPUT (1UL << 1)
+#endif
+
+#ifndef PERF_FLAG_PID_CGROUP
+# define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
+#endif
+
+#ifndef PERF_FLAG_FD_CLOEXEC
+# define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
+#endif
+
+static size_t syscall_arg__scnprintf_perf_flags(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int printed = 0, flags = arg->val;
+
+ if (flags == 0)
+ return 0;
+
+#define P_FLAG(n) \
+ if (flags & PERF_FLAG_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ flags &= ~PERF_FLAG_##n; \
+ }
+
+ P_FLAG(FD_NO_GROUP);
+ P_FLAG(FD_OUTPUT);
+ P_FLAG(PID_CGROUP);
+ P_FLAG(FD_CLOEXEC);
+#undef P_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+ return printed;
+}
+
+#define SCA_PERF_FLAGS syscall_arg__scnprintf_perf_flags
diff --git a/tools/perf/trace/beauty/pid.c b/tools/perf/trace/beauty/pid.c
new file mode 100644
index 000000000000..07486ea65ae3
--- /dev/null
+++ b/tools/perf/trace/beauty/pid.c
@@ -0,0 +1,21 @@
+static size_t syscall_arg__scnprintf_pid(char *bf, size_t size, struct syscall_arg *arg)
+{
+ int pid = arg->val;
+ struct trace *trace = arg->trace;
+ size_t printed = scnprintf(bf, size, "%d", pid);
+ struct thread *thread = machine__findnew_thread(trace->host, pid, pid);
+
+ if (thread != NULL) {
+ if (!thread->comm_set)
+ thread__set_comm_from_proc(thread);
+
+ if (thread->comm_set)
+ printed += scnprintf(bf + printed, size - printed,
+ " (%s)", thread__comm_str(thread));
+ thread__put(thread);
+ }
+
+ return printed;
+}
+
+#define SCA_PID syscall_arg__scnprintf_pid
diff --git a/tools/perf/trace/beauty/sched_policy.c b/tools/perf/trace/beauty/sched_policy.c
new file mode 100644
index 000000000000..c205bc608b3c
--- /dev/null
+++ b/tools/perf/trace/beauty/sched_policy.c
@@ -0,0 +1,44 @@
+#include <sched.h>
+
+/*
+ * Not defined anywhere else, probably, just to make sure we
+ * catch future flags
+ */
+#define SCHED_POLICY_MASK 0xff
+
+#ifndef SCHED_DEADLINE
+#define SCHED_DEADLINE 6
+#endif
+
+static size_t syscall_arg__scnprintf_sched_policy(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ const char *policies[] = {
+ "NORMAL", "FIFO", "RR", "BATCH", "ISO", "IDLE", "DEADLINE",
+ };
+ size_t printed;
+ int policy = arg->val,
+ flags = policy & ~SCHED_POLICY_MASK;
+
+ policy &= SCHED_POLICY_MASK;
+ if (policy <= SCHED_DEADLINE)
+ printed = scnprintf(bf, size, "%s", policies[policy]);
+ else
+ printed = scnprintf(bf, size, "%#x", policy);
+
+#define P_POLICY_FLAG(n) \
+ if (flags & SCHED_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "|%s", #n); \
+ flags &= ~SCHED_##n; \
+ }
+
+ P_POLICY_FLAG(RESET_ON_FORK);
+#undef P_POLICY_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "|%#x", flags);
+
+ return printed;
+}
+
+#define SCA_SCHED_POLICY syscall_arg__scnprintf_sched_policy
diff --git a/tools/perf/trace/beauty/seccomp.c b/tools/perf/trace/beauty/seccomp.c
new file mode 100644
index 000000000000..213c5a7e3e92
--- /dev/null
+++ b/tools/perf/trace/beauty/seccomp.c
@@ -0,0 +1,52 @@
+#include <linux/seccomp.h>
+
+#ifndef SECCOMP_SET_MODE_STRICT
+#define SECCOMP_SET_MODE_STRICT 0
+#endif
+#ifndef SECCOMP_SET_MODE_FILTER
+#define SECCOMP_SET_MODE_FILTER 1
+#endif
+
+static size_t syscall_arg__scnprintf_seccomp_op(char *bf, size_t size, struct syscall_arg *arg)
+{
+ int op = arg->val;
+ size_t printed = 0;
+
+ switch (op) {
+#define P_SECCOMP_SET_MODE_OP(n) case SECCOMP_SET_MODE_##n: printed = scnprintf(bf, size, #n); break
+ P_SECCOMP_SET_MODE_OP(STRICT);
+ P_SECCOMP_SET_MODE_OP(FILTER);
+#undef P_SECCOMP_SET_MODE_OP
+ default: printed = scnprintf(bf, size, "%#x", op); break;
+ }
+
+ return printed;
+}
+
+#define SCA_SECCOMP_OP syscall_arg__scnprintf_seccomp_op
+
+#ifndef SECCOMP_FILTER_FLAG_TSYNC
+#define SECCOMP_FILTER_FLAG_TSYNC 1
+#endif
+
+static size_t syscall_arg__scnprintf_seccomp_flags(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int printed = 0, flags = arg->val;
+
+#define P_FLAG(n) \
+ if (flags & SECCOMP_FILTER_FLAG_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ flags &= ~SECCOMP_FILTER_FLAG_##n; \
+ }
+
+ P_FLAG(TSYNC);
+#undef P_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+ return printed;
+}
+
+#define SCA_SECCOMP_FLAGS syscall_arg__scnprintf_seccomp_flags
diff --git a/tools/perf/trace/beauty/signum.c b/tools/perf/trace/beauty/signum.c
new file mode 100644
index 000000000000..d3b0b1fab077
--- /dev/null
+++ b/tools/perf/trace/beauty/signum.c
@@ -0,0 +1,53 @@
+
+static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscall_arg *arg)
+{
+ int sig = arg->val;
+
+ switch (sig) {
+#define P_SIGNUM(n) case SIG##n: return scnprintf(bf, size, #n)
+ P_SIGNUM(HUP);
+ P_SIGNUM(INT);
+ P_SIGNUM(QUIT);
+ P_SIGNUM(ILL);
+ P_SIGNUM(TRAP);
+ P_SIGNUM(ABRT);
+ P_SIGNUM(BUS);
+ P_SIGNUM(FPE);
+ P_SIGNUM(KILL);
+ P_SIGNUM(USR1);
+ P_SIGNUM(SEGV);
+ P_SIGNUM(USR2);
+ P_SIGNUM(PIPE);
+ P_SIGNUM(ALRM);
+ P_SIGNUM(TERM);
+ P_SIGNUM(CHLD);
+ P_SIGNUM(CONT);
+ P_SIGNUM(STOP);
+ P_SIGNUM(TSTP);
+ P_SIGNUM(TTIN);
+ P_SIGNUM(TTOU);
+ P_SIGNUM(URG);
+ P_SIGNUM(XCPU);
+ P_SIGNUM(XFSZ);
+ P_SIGNUM(VTALRM);
+ P_SIGNUM(PROF);
+ P_SIGNUM(WINCH);
+ P_SIGNUM(IO);
+ P_SIGNUM(PWR);
+ P_SIGNUM(SYS);
+#ifdef SIGEMT
+ P_SIGNUM(EMT);
+#endif
+#ifdef SIGSTKFLT
+ P_SIGNUM(STKFLT);
+#endif
+#ifdef SIGSWI
+ P_SIGNUM(SWI);
+#endif
+ default: break;
+ }
+
+ return scnprintf(bf, size, "%#x", sig);
+}
+
+#define SCA_SIGNUM syscall_arg__scnprintf_signum
diff --git a/tools/perf/trace/beauty/socket_type.c b/tools/perf/trace/beauty/socket_type.c
new file mode 100644
index 000000000000..0a5ce818131c
--- /dev/null
+++ b/tools/perf/trace/beauty/socket_type.c
@@ -0,0 +1,60 @@
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#ifndef SOCK_DCCP
+# define SOCK_DCCP 6
+#endif
+
+#ifndef SOCK_CLOEXEC
+# define SOCK_CLOEXEC 02000000
+#endif
+
+#ifndef SOCK_NONBLOCK
+# define SOCK_NONBLOCK 00004000
+#endif
+
+#ifndef SOCK_TYPE_MASK
+#define SOCK_TYPE_MASK 0xf
+#endif
+
+static size_t syscall_arg__scnprintf_socket_type(char *bf, size_t size, struct syscall_arg *arg)
+{
+ size_t printed;
+ int type = arg->val,
+ flags = type & ~SOCK_TYPE_MASK;
+
+ type &= SOCK_TYPE_MASK;
+ /*
+ * Can't use a strarray, MIPS may override for ABI reasons.
+ */
+ switch (type) {
+#define P_SK_TYPE(n) case SOCK_##n: printed = scnprintf(bf, size, #n); break;
+ P_SK_TYPE(STREAM);
+ P_SK_TYPE(DGRAM);
+ P_SK_TYPE(RAW);
+ P_SK_TYPE(RDM);
+ P_SK_TYPE(SEQPACKET);
+ P_SK_TYPE(DCCP);
+ P_SK_TYPE(PACKET);
+#undef P_SK_TYPE
+ default:
+ printed = scnprintf(bf, size, "%#x", type);
+ }
+
+#define P_SK_FLAG(n) \
+ if (flags & SOCK_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "|%s", #n); \
+ flags &= ~SOCK_##n; \
+ }
+
+ P_SK_FLAG(CLOEXEC);
+ P_SK_FLAG(NONBLOCK);
+#undef P_SK_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "|%#x", flags);
+
+ return printed;
+}
+
+#define SCA_SK_TYPE syscall_arg__scnprintf_socket_type
diff --git a/tools/perf/trace/beauty/waitid_options.c b/tools/perf/trace/beauty/waitid_options.c
new file mode 100644
index 000000000000..7942724adec8
--- /dev/null
+++ b/tools/perf/trace/beauty/waitid_options.c
@@ -0,0 +1,26 @@
+#include <sys/types.h>
+#include <sys/wait.h>
+
+static size_t syscall_arg__scnprintf_waitid_options(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int printed = 0, options = arg->val;
+
+#define P_OPTION(n) \
+ if (options & W##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ options &= ~W##n; \
+ }
+
+ P_OPTION(NOHANG);
+ P_OPTION(UNTRACED);
+ P_OPTION(CONTINUED);
+#undef P_OPTION
+
+ if (options)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", options);
+
+ return printed;
+}
+
+#define SCA_WAITID_OPTIONS syscall_arg__scnprintf_waitid_options
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 2a83414159a6..538bae880bfe 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1607,9 +1607,8 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
dummy_hpp.buf[ret] = '\0';
- rtrim(dummy_hpp.buf);
- start = ltrim(dummy_hpp.buf);
+ start = trim(dummy_hpp.buf);
ret = strlen(start);
if (start != dummy_hpp.buf)
@@ -1897,11 +1896,10 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser,
bool first = true;
int ret;
- if (symbol_conf.use_callchain)
+ if (symbol_conf.use_callchain) {
folded_sign = hist_entry__folded(he);
-
- if (symbol_conf.use_callchain)
printed += fprintf(fp, "%c ", folded_sign);
+ }
hists__for_each_format(browser->hists, fmt) {
if (perf_hpp__should_skip(fmt, he->hists))
@@ -2137,7 +2135,7 @@ static int hists__browser_title(struct hists *hists,
printed += snprintf(bf + printed, size - printed,
", UID: %s", hists->uid_filter_str);
if (thread) {
- if (sort__has_thread) {
+ if (hists__has(hists, thread)) {
printed += scnprintf(bf + printed, size - printed,
", Thread: %s(%d)",
(thread->comm_set ? thread__comm_str(thread) : ""),
@@ -2322,7 +2320,8 @@ do_zoom_thread(struct hist_browser *browser, struct popup_action *act)
{
struct thread *thread = act->thread;
- if ((!sort__has_thread && !sort__has_comm) || thread == NULL)
+ if ((!hists__has(browser->hists, thread) &&
+ !hists__has(browser->hists, comm)) || thread == NULL)
return 0;
if (browser->hists->thread_filter) {
@@ -2331,7 +2330,7 @@ do_zoom_thread(struct hist_browser *browser, struct popup_action *act)
thread__zput(browser->hists->thread_filter);
ui_helpline__pop();
} else {
- if (sort__has_thread) {
+ if (hists__has(browser->hists, thread)) {
ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s(%d) thread\"",
thread->comm_set ? thread__comm_str(thread) : "",
thread->tid);
@@ -2356,10 +2355,11 @@ add_thread_opt(struct hist_browser *browser, struct popup_action *act,
{
int ret;
- if ((!sort__has_thread && !sort__has_comm) || thread == NULL)
+ if ((!hists__has(browser->hists, thread) &&
+ !hists__has(browser->hists, comm)) || thread == NULL)
return 0;
- if (sort__has_thread) {
+ if (hists__has(browser->hists, thread)) {
ret = asprintf(optstr, "Zoom %s %s(%d) thread",
browser->hists->thread_filter ? "out of" : "into",
thread->comm_set ? thread__comm_str(thread) : "",
@@ -2382,7 +2382,7 @@ do_zoom_dso(struct hist_browser *browser, struct popup_action *act)
{
struct map *map = act->ms.map;
- if (!sort__has_dso || map == NULL)
+ if (!hists__has(browser->hists, dso) || map == NULL)
return 0;
if (browser->hists->dso_filter) {
@@ -2409,7 +2409,7 @@ static int
add_dso_opt(struct hist_browser *browser, struct popup_action *act,
char **optstr, struct map *map)
{
- if (!sort__has_dso || map == NULL)
+ if (!hists__has(browser->hists, dso) || map == NULL)
return 0;
if (asprintf(optstr, "Zoom %s %s DSO",
@@ -2431,10 +2431,10 @@ do_browse_map(struct hist_browser *browser __maybe_unused,
}
static int
-add_map_opt(struct hist_browser *browser __maybe_unused,
+add_map_opt(struct hist_browser *browser,
struct popup_action *act, char **optstr, struct map *map)
{
- if (!sort__has_dso || map == NULL)
+ if (!hists__has(browser->hists, dso) || map == NULL)
return 0;
if (asprintf(optstr, "Browse map details") < 0)
@@ -2536,7 +2536,7 @@ add_exit_opt(struct hist_browser *browser __maybe_unused,
static int
do_zoom_socket(struct hist_browser *browser, struct popup_action *act)
{
- if (!sort__has_socket || act->socket < 0)
+ if (!hists__has(browser->hists, socket) || act->socket < 0)
return 0;
if (browser->hists->socket_filter > -1) {
@@ -2558,7 +2558,7 @@ static int
add_socket_opt(struct hist_browser *browser, struct popup_action *act,
char **optstr, int socket_id)
{
- if (!sort__has_socket || socket_id < 0)
+ if (!hists__has(browser->hists, socket) || socket_id < 0)
return 0;
if (asprintf(optstr, "Zoom %s Processor Socket %d",
@@ -2749,7 +2749,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
*/
goto out_free_stack;
case 'a':
- if (!sort__has_sym) {
+ if (!hists__has(hists, sym)) {
ui_browser__warning(&browser->b, delay_secs * 2,
"Annotation is only available for symbolic views, "
"include \"sym*\" in --sort to use it.");
@@ -2912,7 +2912,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
continue;
}
- if (!sort__has_sym || browser->selection == NULL)
+ if (!hists__has(hists, sym) || browser->selection == NULL)
goto skip_annotation;
if (sort__mode == SORT_MODE__BRANCH) {
@@ -2956,7 +2956,7 @@ skip_annotation:
goto skip_scripting;
if (browser->he_selection) {
- if (sort__has_thread && thread) {
+ if (hists__has(hists, thread) && thread) {
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
@@ -2971,7 +2971,7 @@ skip_annotation:
*
* See hist_browser__show_entry.
*/
- if (sort__has_sym && browser->selection->sym) {
+ if (hists__has(hists, sym) && browser->selection->sym) {
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index 2aa45b606fa4..932adfaa05af 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -379,7 +379,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
gtk_tree_store_set(store, &iter, col_idx++, s, -1);
}
- if (symbol_conf.use_callchain && sort__has_sym) {
+ if (symbol_conf.use_callchain && hists__has(hists, sym)) {
if (callchain_param.mode == CHAIN_GRAPH_REL)
total = symbol_conf.cumulate_callchain ?
h->stat_acc->period : h->stat.period;
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 3baeaa6e71b5..af07ffb129ca 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -635,7 +635,7 @@ unsigned int hists__sort_list_width(struct hists *hists)
ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
}
- if (verbose && sort__has_sym) /* Addr + origin */
+ if (verbose && hists__has(hists, sym)) /* Addr + origin */
ret += 3 + BITS_PER_LONG / 4;
return ret;
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 7aff5acf3265..560eb47d56f9 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -569,9 +569,8 @@ static int print_hierarchy_header(struct hists *hists, struct perf_hpp *hpp,
first_col = false;
fmt->header(fmt, hpp, hists_to_evsel(hists));
- rtrim(hpp->buf);
- header_width += fprintf(fp, "%s", ltrim(hpp->buf));
+ header_width += fprintf(fp, "%s", trim(hpp->buf));
}
}
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index da48fd843438..8c6c8a0ca642 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -8,6 +8,7 @@ libperf-y += env.o
libperf-y += event.o
libperf-y += evlist.o
libperf-y += evsel.o
+libperf-y += evsel_fprintf.o
libperf-y += find_bit.o
libperf-y += kallsyms.o
libperf-y += levenshtein.o
@@ -26,9 +27,9 @@ libperf-y += strlist.o
libperf-y += strfilter.o
libperf-y += top.o
libperf-y += usage.o
-libperf-y += wrapper.o
libperf-y += dso.o
libperf-y += symbol.o
+libperf-y += symbol_fprintf.o
libperf-y += color.o
libperf-y += header.o
libperf-y += callchain.o
@@ -38,6 +39,7 @@ libperf-y += machine.o
libperf-y += map.o
libperf-y += pstack.o
libperf-y += session.o
+libperf-$(CONFIG_AUDIT) += syscalltbl.o
libperf-y += ordered-events.o
libperf-y += comm.o
libperf-y += thread.o
@@ -69,9 +71,9 @@ libperf-y += stat-shadow.o
libperf-y += record.o
libperf-y += srcline.o
libperf-y += data.o
-libperf-$(CONFIG_X86) += tsc.o
-libperf-$(CONFIG_AUXTRACE) += tsc.o
+libperf-y += tsc.o
libperf-y += cloexec.o
+libperf-y += call-path.o
libperf-y += thread-stack.o
libperf-$(CONFIG_AUXTRACE) += auxtrace.o
libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index b795b6994144..4db73d5a0dbc 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1138,7 +1138,7 @@ fallback:
if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
!dso__is_kcore(dso)) {
- char bf[BUILD_ID_SIZE * 2 + 16] = " with build id ";
+ char bf[SBUILD_ID_SIZE + 15] = " with build id ";
char *build_id_msg = NULL;
if (dso->annotate_warned)
@@ -1665,5 +1665,5 @@ int hist_entry__annotate(struct hist_entry *he, size_t privsize)
bool ui__has_annotation(void)
{
- return use_browser == 1 && sort__has_sym;
+ return use_browser == 1 && perf_hpp_list.sym;
}
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index ec164fe70718..c9169011e55e 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -940,6 +940,7 @@ void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
+ synth_opts->initial_skip = 0;
}
/*
@@ -1064,6 +1065,12 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
synth_opts->last_branch_sz = val;
}
break;
+ case 's':
+ synth_opts->initial_skip = strtoul(p, &endptr, 10);
+ if (p == endptr)
+ goto out_err;
+ p = endptr;
+ break;
case ' ':
case ',':
break;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 57ff31ecb8e4..767989e0e312 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -68,6 +68,7 @@ enum itrace_period_type {
* @last_branch_sz: branch context size
* @period: 'instructions' events period
* @period_type: 'instructions' events period type
+ * @initial_skip: skip N events at the beginning.
*/
struct itrace_synth_opts {
bool set;
@@ -86,6 +87,7 @@ struct itrace_synth_opts {
unsigned int last_branch_sz;
unsigned long long period;
enum itrace_period_type period_type;
+ unsigned long initial_skip;
};
/**
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 0967ce601931..493307d1414c 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -842,6 +842,58 @@ bpf_map_op__new(struct parse_events_term *term)
return op;
}
+static struct bpf_map_op *
+bpf_map_op__clone(struct bpf_map_op *op)
+{
+ struct bpf_map_op *newop;
+
+ newop = memdup(op, sizeof(*op));
+ if (!newop) {
+ pr_debug("Failed to alloc bpf_map_op\n");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&newop->list);
+ if (op->key_type == BPF_MAP_KEY_RANGES) {
+ size_t memsz = op->k.array.nr_ranges *
+ sizeof(op->k.array.ranges[0]);
+
+ newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
+ if (!newop->k.array.ranges) {
+ pr_debug("Failed to alloc indices for map\n");
+ free(newop);
+ return NULL;
+ }
+ }
+
+ return newop;
+}
+
+static struct bpf_map_priv *
+bpf_map_priv__clone(struct bpf_map_priv *priv)
+{
+ struct bpf_map_priv *newpriv;
+ struct bpf_map_op *pos, *newop;
+
+ newpriv = zalloc(sizeof(*newpriv));
+ if (!newpriv) {
+ pr_debug("No enough memory to alloc map private\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&newpriv->ops_list);
+
+ list_for_each_entry(pos, &priv->ops_list, list) {
+ newop = bpf_map_op__clone(pos);
+ if (!newop) {
+ bpf_map_priv__purge(newpriv);
+ return NULL;
+ }
+ list_add_tail(&newop->list, &newpriv->ops_list);
+ }
+
+ return newpriv;
+}
+
static int
bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
{
@@ -1417,6 +1469,89 @@ int bpf__apply_obj_config(void)
return 0;
}
+#define bpf__for_each_map(pos, obj, objtmp) \
+ bpf_object__for_each_safe(obj, objtmp) \
+ bpf_map__for_each(pos, obj)
+
+#define bpf__for_each_stdout_map(pos, obj, objtmp) \
+ bpf__for_each_map(pos, obj, objtmp) \
+ if (bpf_map__get_name(pos) && \
+ (strcmp("__bpf_stdout__", \
+ bpf_map__get_name(pos)) == 0))
+
+int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
+{
+ struct bpf_map_priv *tmpl_priv = NULL;
+ struct bpf_object *obj, *tmp;
+ struct perf_evsel *evsel = NULL;
+ struct bpf_map *map;
+ int err;
+ bool need_init = false;
+
+ bpf__for_each_stdout_map(map, obj, tmp) {
+ struct bpf_map_priv *priv;
+
+ err = bpf_map__get_private(map, (void **)&priv);
+ if (err)
+ return -BPF_LOADER_ERRNO__INTERNAL;
+
+ /*
+ * No need to check map type: type should have been
+ * verified by kernel.
+ */
+ if (!need_init && !priv)
+ need_init = !priv;
+ if (!tmpl_priv && priv)
+ tmpl_priv = priv;
+ }
+
+ if (!need_init)
+ return 0;
+
+ if (!tmpl_priv) {
+ err = parse_events(evlist, "bpf-output/no-inherit=1,name=__bpf_stdout__/",
+ NULL);
+ if (err) {
+ pr_debug("ERROR: failed to create bpf-output event\n");
+ return -err;
+ }
+
+ evsel = perf_evlist__last(evlist);
+ }
+
+ bpf__for_each_stdout_map(map, obj, tmp) {
+ struct bpf_map_priv *priv;
+
+ err = bpf_map__get_private(map, (void **)&priv);
+ if (err)
+ return -BPF_LOADER_ERRNO__INTERNAL;
+ if (priv)
+ continue;
+
+ if (tmpl_priv) {
+ priv = bpf_map_priv__clone(tmpl_priv);
+ if (!priv)
+ return -ENOMEM;
+
+ err = bpf_map__set_private(map, priv, bpf_map_priv__clear);
+ if (err) {
+ bpf_map_priv__clear(map, priv);
+ return err;
+ }
+ } else if (evsel) {
+ struct bpf_map_op *op;
+
+ op = bpf_map__add_newop(map, NULL);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
+ op->op_type = BPF_MAP_OP_SET_EVSEL;
+ op->v.evsel = evsel;
+ }
+ }
+
+ return 0;
+}
+
#define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
#define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
#define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
@@ -1590,3 +1725,11 @@ int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
bpf__strerror_end(buf, size);
return 0;
}
+
+int bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused,
+ int err, char *buf, size_t size)
+{
+ bpf__strerror_head(err, buf, size);
+ bpf__strerror_end(buf, size);
+ return 0;
+}
diff --git a/tools/perf/util/bpf-loader.h b/tools/perf/util/bpf-loader.h
index be4311944e3d..941e17275aa7 100644
--- a/tools/perf/util/bpf-loader.h
+++ b/tools/perf/util/bpf-loader.h
@@ -79,6 +79,11 @@ int bpf__strerror_config_obj(struct bpf_object *obj,
size_t size);
int bpf__apply_obj_config(void);
int bpf__strerror_apply_obj_config(int err, char *buf, size_t size);
+
+int bpf__setup_stdout(struct perf_evlist *evlist);
+int bpf__strerror_setup_stdout(struct perf_evlist *evlist, int err,
+ char *buf, size_t size);
+
#else
static inline struct bpf_object *
bpf__prepare_load(const char *filename __maybe_unused,
@@ -125,6 +130,12 @@ bpf__apply_obj_config(void)
}
static inline int
+bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
+{
+ return 0;
+}
+
+static inline int
__bpf_strerror(char *buf, size_t size)
{
if (!size)
@@ -177,5 +188,13 @@ bpf__strerror_apply_obj_config(int err __maybe_unused,
{
return __bpf_strerror(buf, size);
}
+
+static inline int
+bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused,
+ int err __maybe_unused, char *buf,
+ size_t size)
+{
+ return __bpf_strerror(buf, size);
+}
#endif
#endif
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 0573c2ec861d..bff425e1232c 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -261,14 +261,14 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
if (dso__is_vdso(pos)) {
name = pos->short_name;
- name_len = pos->short_name_len + 1;
+ name_len = pos->short_name_len;
} else if (dso__is_kcore(pos)) {
machine__mmap_name(machine, nm, sizeof(nm));
name = nm;
- name_len = strlen(nm) + 1;
+ name_len = strlen(nm);
} else {
name = pos->long_name;
- name_len = pos->long_name_len + 1;
+ name_len = pos->long_name_len;
}
in_kernel = pos->kernel ||
@@ -365,39 +365,17 @@ static char *build_id_cache__dirname_from_path(const char *name,
int build_id_cache__list_build_ids(const char *pathname,
struct strlist **result)
{
- struct strlist *list;
char *dir_name;
- DIR *dir;
- struct dirent *d;
int ret = 0;
- list = strlist__new(NULL, NULL);
dir_name = build_id_cache__dirname_from_path(pathname, false, false);
- if (!list || !dir_name) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!dir_name)
+ return -ENOMEM;
- /* List up all dirents */
- dir = opendir(dir_name);
- if (!dir) {
+ *result = lsdir(dir_name, lsdir_no_dot_filter);
+ if (!*result)
ret = -errno;
- goto out;
- }
-
- while ((d = readdir(dir)) != NULL) {
- if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
- continue;
- strlist__add(list, d->d_name);
- }
- closedir(dir);
-
-out:
free(dir_name);
- if (ret)
- strlist__delete(list);
- else
- *result = list;
return ret;
}
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 1f5a93c2c9a2..0d814bb74661 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -40,25 +40,6 @@ int split_cmdline(char *cmdline, const char ***argv);
#define alloc_nr(x) (((x)+16)*3/2)
-/*
- * Realloc the buffer pointed at by variable 'x' so that it can hold
- * at least 'nr' entries; the number of entries currently allocated
- * is 'alloc', using the standard growing factor alloc_nr() macro.
- *
- * DO NOT USE any expression with side-effect for 'x' or 'alloc'.
- */
-#define ALLOC_GROW(x, nr, alloc) \
- do { \
- if ((nr) > alloc) { \
- if (alloc_nr(alloc) < (nr)) \
- alloc = (nr); \
- else \
- alloc = alloc_nr(alloc); \
- x = xrealloc((x), alloc * sizeof(*(x))); \
- } \
- } while(0)
-
-
static inline int is_absolute_path(const char *path)
{
return path[0] == '/';
diff --git a/tools/perf/util/call-path.c b/tools/perf/util/call-path.c
new file mode 100644
index 000000000000..904a17052e38
--- /dev/null
+++ b/tools/perf/util/call-path.c
@@ -0,0 +1,122 @@
+/*
+ * call-path.h: Manipulate a tree data structure containing function call paths
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/rbtree.h>
+#include <linux/list.h>
+
+#include "util.h"
+#include "call-path.h"
+
+static void call_path__init(struct call_path *cp, struct call_path *parent,
+ struct symbol *sym, u64 ip, bool in_kernel)
+{
+ cp->parent = parent;
+ cp->sym = sym;
+ cp->ip = sym ? 0 : ip;
+ cp->db_id = 0;
+ cp->in_kernel = in_kernel;
+ RB_CLEAR_NODE(&cp->rb_node);
+ cp->children = RB_ROOT;
+}
+
+struct call_path_root *call_path_root__new(void)
+{
+ struct call_path_root *cpr;
+
+ cpr = zalloc(sizeof(struct call_path_root));
+ if (!cpr)
+ return NULL;
+ call_path__init(&cpr->call_path, NULL, NULL, 0, false);
+ INIT_LIST_HEAD(&cpr->blocks);
+ return cpr;
+}
+
+void call_path_root__free(struct call_path_root *cpr)
+{
+ struct call_path_block *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &cpr->blocks, node) {
+ list_del(&pos->node);
+ free(pos);
+ }
+ free(cpr);
+}
+
+static struct call_path *call_path__new(struct call_path_root *cpr,
+ struct call_path *parent,
+ struct symbol *sym, u64 ip,
+ bool in_kernel)
+{
+ struct call_path_block *cpb;
+ struct call_path *cp;
+ size_t n;
+
+ if (cpr->next < cpr->sz) {
+ cpb = list_last_entry(&cpr->blocks, struct call_path_block,
+ node);
+ } else {
+ cpb = zalloc(sizeof(struct call_path_block));
+ if (!cpb)
+ return NULL;
+ list_add_tail(&cpb->node, &cpr->blocks);
+ cpr->sz += CALL_PATH_BLOCK_SIZE;
+ }
+
+ n = cpr->next++ & CALL_PATH_BLOCK_MASK;
+ cp = &cpb->cp[n];
+
+ call_path__init(cp, parent, sym, ip, in_kernel);
+
+ return cp;
+}
+
+struct call_path *call_path__findnew(struct call_path_root *cpr,
+ struct call_path *parent,
+ struct symbol *sym, u64 ip, u64 ks)
+{
+ struct rb_node **p;
+ struct rb_node *node_parent = NULL;
+ struct call_path *cp;
+ bool in_kernel = ip >= ks;
+
+ if (sym)
+ ip = 0;
+
+ if (!parent)
+ return call_path__new(cpr, parent, sym, ip, in_kernel);
+
+ p = &parent->children.rb_node;
+ while (*p != NULL) {
+ node_parent = *p;
+ cp = rb_entry(node_parent, struct call_path, rb_node);
+
+ if (cp->sym == sym && cp->ip == ip)
+ return cp;
+
+ if (sym < cp->sym || (sym == cp->sym && ip < cp->ip))
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ cp = call_path__new(cpr, parent, sym, ip, in_kernel);
+ if (!cp)
+ return NULL;
+
+ rb_link_node(&cp->rb_node, node_parent, p);
+ rb_insert_color(&cp->rb_node, &parent->children);
+
+ return cp;
+}
diff --git a/tools/perf/util/call-path.h b/tools/perf/util/call-path.h
new file mode 100644
index 000000000000..477f6d03b659
--- /dev/null
+++ b/tools/perf/util/call-path.h
@@ -0,0 +1,77 @@
+/*
+ * call-path.h: Manipulate a tree data structure containing function call paths
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __PERF_CALL_PATH_H
+#define __PERF_CALL_PATH_H
+
+#include <sys/types.h>
+
+#include <linux/types.h>
+#include <linux/rbtree.h>
+
+/**
+ * struct call_path - node in list of calls leading to a function call.
+ * @parent: call path to the parent function call
+ * @sym: symbol of function called
+ * @ip: only if sym is null, the ip of the function
+ * @db_id: id used for db-export
+ * @in_kernel: whether function is a in the kernel
+ * @rb_node: node in parent's tree of called functions
+ * @children: tree of call paths of functions called
+ *
+ * In combination with the call_return structure, the call_path structure
+ * defines a context-sensitve call-graph.
+ */
+struct call_path {
+ struct call_path *parent;
+ struct symbol *sym;
+ u64 ip;
+ u64 db_id;
+ bool in_kernel;
+ struct rb_node rb_node;
+ struct rb_root children;
+};
+
+#define CALL_PATH_BLOCK_SHIFT 8
+#define CALL_PATH_BLOCK_SIZE (1 << CALL_PATH_BLOCK_SHIFT)
+#define CALL_PATH_BLOCK_MASK (CALL_PATH_BLOCK_SIZE - 1)
+
+struct call_path_block {
+ struct call_path cp[CALL_PATH_BLOCK_SIZE];
+ struct list_head node;
+};
+
+/**
+ * struct call_path_root - root of all call paths.
+ * @call_path: root call path
+ * @blocks: list of blocks to store call paths
+ * @next: next free space
+ * @sz: number of spaces
+ */
+struct call_path_root {
+ struct call_path call_path;
+ struct list_head blocks;
+ size_t next;
+ size_t sz;
+};
+
+struct call_path_root *call_path_root__new(void);
+void call_path_root__free(struct call_path_root *cpr);
+
+struct call_path *call_path__findnew(struct call_path_root *cpr,
+ struct call_path *parent,
+ struct symbol *sym, u64 ip, u64 ks);
+
+#endif
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 24b4bd0d7754..07fd30bc2f81 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -109,6 +109,7 @@ __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
bool record_opt_set = false;
bool try_stack_size = false;
+ callchain_param.enabled = true;
symbol_conf.use_callchain = true;
if (!arg)
@@ -117,6 +118,7 @@ __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
while ((tok = strtok((char *)arg, ",")) != NULL) {
if (!strncmp(tok, "none", strlen(tok))) {
callchain_param.mode = CHAIN_NONE;
+ callchain_param.enabled = false;
symbol_conf.use_callchain = false;
return 0;
}
@@ -788,7 +790,8 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
return 0;
}
-int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent,
+int sample__resolve_callchain(struct perf_sample *sample,
+ struct callchain_cursor *cursor, struct symbol **parent,
struct perf_evsel *evsel, struct addr_location *al,
int max_stack)
{
@@ -796,8 +799,8 @@ int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent
return 0;
if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
- sort__has_parent) {
- return thread__resolve_callchain(al->thread, evsel, sample,
+ perf_hpp_list.parent) {
+ return thread__resolve_callchain(al->thread, cursor, evsel, sample,
parent, al, max_stack);
}
return 0;
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index d2a9e694810c..65e2a4f7cb4e 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -212,7 +212,14 @@ struct hist_entry;
int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
int record_callchain_opt(const struct option *opt, const char *arg, int unset);
-int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent,
+struct record_opts;
+
+int record_opts__parse_callchain(struct record_opts *record,
+ struct callchain_param *callchain,
+ const char *arg, bool unset);
+
+int sample__resolve_callchain(struct perf_sample *sample,
+ struct callchain_cursor *cursor, struct symbol **parent,
struct perf_evsel *evsel, struct addr_location *al,
int max_stack);
int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample);
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 4e727635476e..dad7d8272168 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -13,6 +13,7 @@
#include <subcmd/exec-cmd.h>
#include "util/hist.h" /* perf_hist_config */
#include "util/llvm-utils.h" /* perf_llvm_config */
+#include "config.h"
#define MAXNAME (256)
@@ -377,6 +378,21 @@ const char *perf_config_dirname(const char *name, const char *value)
return value;
}
+static int perf_buildid_config(const char *var, const char *value)
+{
+ /* same dir for all commands */
+ if (!strcmp(var, "buildid.dir")) {
+ const char *dir = perf_config_dirname(var, value);
+
+ if (!dir)
+ return -1;
+ strncpy(buildid_dir, dir, MAXPATHLEN-1);
+ buildid_dir[MAXPATHLEN-1] = '\0';
+ }
+
+ return 0;
+}
+
static int perf_default_core_config(const char *var __maybe_unused,
const char *value __maybe_unused)
{
@@ -412,6 +428,9 @@ int perf_default_config(const char *var, const char *value,
if (!prefixcmp(var, "llvm."))
return perf_llvm_config(var, value);
+ if (!prefixcmp(var, "buildid."))
+ return perf_buildid_config(var, value);
+
/* Add other config variables here. */
return 0;
}
@@ -506,41 +525,185 @@ out:
return ret;
}
-/*
- * Call this to report error for your variable that should not
- * get a boolean value (i.e. "[my] var" means "true").
- */
-int config_error_nonbool(const char *var)
+static struct perf_config_section *find_section(struct list_head *sections,
+ const char *section_name)
{
- return error("Missing value for '%s'", var);
+ struct perf_config_section *section;
+
+ list_for_each_entry(section, sections, node)
+ if (!strcmp(section->name, section_name))
+ return section;
+
+ return NULL;
+}
+
+static struct perf_config_item *find_config_item(const char *name,
+ struct perf_config_section *section)
+{
+ struct perf_config_item *item;
+
+ list_for_each_entry(item, &section->items, node)
+ if (!strcmp(item->name, name))
+ return item;
+
+ return NULL;
}
-struct buildid_dir_config {
- char *dir;
-};
+static struct perf_config_section *add_section(struct list_head *sections,
+ const char *section_name)
+{
+ struct perf_config_section *section = zalloc(sizeof(*section));
+
+ if (!section)
+ return NULL;
+
+ INIT_LIST_HEAD(&section->items);
+ section->name = strdup(section_name);
+ if (!section->name) {
+ pr_debug("%s: strdup failed\n", __func__);
+ free(section);
+ return NULL;
+ }
+
+ list_add_tail(&section->node, sections);
+ return section;
+}
-static int buildid_dir_command_config(const char *var, const char *value,
- void *data)
+static struct perf_config_item *add_config_item(struct perf_config_section *section,
+ const char *name)
{
- struct buildid_dir_config *c = data;
- const char *v;
+ struct perf_config_item *item = zalloc(sizeof(*item));
- /* same dir for all commands */
- if (!strcmp(var, "buildid.dir")) {
- v = perf_config_dirname(var, value);
- if (!v)
- return -1;
- strncpy(c->dir, v, MAXPATHLEN-1);
- c->dir[MAXPATHLEN-1] = '\0';
+ if (!item)
+ return NULL;
+
+ item->name = strdup(name);
+ if (!item->name) {
+ pr_debug("%s: strdup failed\n", __func__);
+ free(item);
+ return NULL;
}
+
+ list_add_tail(&item->node, &section->items);
+ return item;
+}
+
+static int set_value(struct perf_config_item *item, const char *value)
+{
+ char *val = strdup(value);
+
+ if (!val)
+ return -1;
+
+ zfree(&item->value);
+ item->value = val;
return 0;
}
-static void check_buildid_dir_config(void)
+static int collect_config(const char *var, const char *value,
+ void *perf_config_set)
{
- struct buildid_dir_config c;
- c.dir = buildid_dir;
- perf_config(buildid_dir_command_config, &c);
+ int ret = -1;
+ char *ptr, *key;
+ char *section_name, *name;
+ struct perf_config_section *section = NULL;
+ struct perf_config_item *item = NULL;
+ struct perf_config_set *set = perf_config_set;
+ struct list_head *sections = &set->sections;
+
+ key = ptr = strdup(var);
+ if (!key) {
+ pr_debug("%s: strdup failed\n", __func__);
+ return -1;
+ }
+
+ section_name = strsep(&ptr, ".");
+ name = ptr;
+ if (name == NULL || value == NULL)
+ goto out_free;
+
+ section = find_section(sections, section_name);
+ if (!section) {
+ section = add_section(sections, section_name);
+ if (!section)
+ goto out_free;
+ }
+
+ item = find_config_item(name, section);
+ if (!item) {
+ item = add_config_item(section, name);
+ if (!item)
+ goto out_free;
+ }
+
+ ret = set_value(item, value);
+ return ret;
+
+out_free:
+ free(key);
+ perf_config_set__delete(set);
+ return -1;
+}
+
+struct perf_config_set *perf_config_set__new(void)
+{
+ struct perf_config_set *set = zalloc(sizeof(*set));
+
+ if (set) {
+ INIT_LIST_HEAD(&set->sections);
+ perf_config(collect_config, set);
+ }
+
+ return set;
+}
+
+static void perf_config_item__delete(struct perf_config_item *item)
+{
+ zfree(&item->name);
+ zfree(&item->value);
+ free(item);
+}
+
+static void perf_config_section__purge(struct perf_config_section *section)
+{
+ struct perf_config_item *item, *tmp;
+
+ list_for_each_entry_safe(item, tmp, &section->items, node) {
+ list_del_init(&item->node);
+ perf_config_item__delete(item);
+ }
+}
+
+static void perf_config_section__delete(struct perf_config_section *section)
+{
+ perf_config_section__purge(section);
+ zfree(&section->name);
+ free(section);
+}
+
+static void perf_config_set__purge(struct perf_config_set *set)
+{
+ struct perf_config_section *section, *tmp;
+
+ list_for_each_entry_safe(section, tmp, &set->sections, node) {
+ list_del_init(&section->node);
+ perf_config_section__delete(section);
+ }
+}
+
+void perf_config_set__delete(struct perf_config_set *set)
+{
+ perf_config_set__purge(set);
+ free(set);
+}
+
+/*
+ * Call this to report error for your variable that should not
+ * get a boolean value (i.e. "[my] var" means "true").
+ */
+int config_error_nonbool(const char *var)
+{
+ return error("Missing value for '%s'", var);
}
void set_buildid_dir(const char *dir)
@@ -548,16 +711,13 @@ void set_buildid_dir(const char *dir)
if (dir)
scnprintf(buildid_dir, MAXPATHLEN-1, "%s", dir);
- /* try config file */
- if (buildid_dir[0] == '\0')
- check_buildid_dir_config();
-
/* default to $HOME/.debug */
if (buildid_dir[0] == '\0') {
- char *v = getenv("HOME");
- if (v) {
+ char *home = getenv("HOME");
+
+ if (home) {
snprintf(buildid_dir, MAXPATHLEN-1, "%s/%s",
- v, DEBUG_CACHE_DIR);
+ home, DEBUG_CACHE_DIR);
} else {
strncpy(buildid_dir, DEBUG_CACHE_DIR, MAXPATHLEN-1);
}
diff --git a/tools/perf/util/config.h b/tools/perf/util/config.h
new file mode 100644
index 000000000000..22ec626ac718
--- /dev/null
+++ b/tools/perf/util/config.h
@@ -0,0 +1,26 @@
+#ifndef __PERF_CONFIG_H
+#define __PERF_CONFIG_H
+
+#include <stdbool.h>
+#include <linux/list.h>
+
+struct perf_config_item {
+ char *name;
+ char *value;
+ struct list_head node;
+};
+
+struct perf_config_section {
+ char *name;
+ struct list_head items;
+ struct list_head node;
+};
+
+struct perf_config_set {
+ struct list_head sections;
+};
+
+struct perf_config_set *perf_config_set__new(void);
+void perf_config_set__delete(struct perf_config_set *set);
+
+#endif /* __PERF_CONFIG_H */
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 9bcf2bed3a6d..02d801670f30 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -587,3 +587,15 @@ int cpu__setup_cpunode_map(void)
closedir(dir1);
return 0;
}
+
+bool cpu_map__has(struct cpu_map *cpus, int cpu)
+{
+ int i;
+
+ for (i = 0; i < cpus->nr; ++i) {
+ if (cpus->map[i] == cpu)
+ return true;
+ }
+
+ return false;
+}
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index 81a2562aaa2b..1a0a35073ce1 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -66,4 +66,6 @@ int cpu__get_node(int cpu);
int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
int (*f)(struct cpu_map *map, int cpu, void *data),
void *data);
+
+bool cpu_map__has(struct cpu_map *cpus, int cpu);
#endif /* __PERF_CPUMAP_H */
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 1921942fc2e0..be83516155ee 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -136,3 +136,44 @@ ssize_t perf_data_file__write(struct perf_data_file *file,
{
return writen(file->fd, buf, size);
}
+
+int perf_data_file__switch(struct perf_data_file *file,
+ const char *postfix,
+ size_t pos, bool at_exit)
+{
+ char *new_filepath;
+ int ret;
+
+ if (check_pipe(file))
+ return -EINVAL;
+ if (perf_data_file__is_read(file))
+ return -EINVAL;
+
+ if (asprintf(&new_filepath, "%s.%s", file->path, postfix) < 0)
+ return -ENOMEM;
+
+ /*
+ * Only fire a warning, don't return error, continue fill
+ * original file.
+ */
+ if (rename(file->path, new_filepath))
+ pr_warning("Failed to rename %s to %s\n", file->path, new_filepath);
+
+ if (!at_exit) {
+ close(file->fd);
+ ret = perf_data_file__open(file);
+ if (ret < 0)
+ goto out;
+
+ if (lseek(file->fd, pos, SEEK_SET) == (off_t)-1) {
+ ret = -errno;
+ pr_debug("Failed to lseek to %zu: %s",
+ pos, strerror(errno));
+ goto out;
+ }
+ }
+ ret = file->fd;
+out:
+ free(new_filepath);
+ return ret;
+}
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 2b15d0c95c7f..ae510ce16cb1 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -46,5 +46,14 @@ int perf_data_file__open(struct perf_data_file *file);
void perf_data_file__close(struct perf_data_file *file);
ssize_t perf_data_file__write(struct perf_data_file *file,
void *buf, size_t size);
-
+/*
+ * If at_exit is set, only rename current perf.data to
+ * perf.data.<postfix>, continue write on original file.
+ * Set at_exit when flushing the last output.
+ *
+ * Return value is fd of new output.
+ */
+int perf_data_file__switch(struct perf_data_file *file,
+ const char *postfix,
+ size_t pos, bool at_exit);
#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index 049438d51b9a..8d96c80cc67e 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -23,6 +23,8 @@
#include "event.h"
#include "util.h"
#include "thread-stack.h"
+#include "callchain.h"
+#include "call-path.h"
#include "db-export.h"
struct deferred_export {
@@ -258,8 +260,7 @@ static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
if (!al->sym) {
al->sym = symbol__new(al->addr, 0, 0, "unknown");
if (al->sym)
- symbols__insert(&dso->symbols[al->map->type],
- al->sym);
+ dso__insert_symbol(dso, al->map->type, al->sym);
}
if (al->sym) {
@@ -276,6 +277,80 @@ static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
return 0;
}
+static struct call_path *call_path_from_sample(struct db_export *dbe,
+ struct machine *machine,
+ struct thread *thread,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel)
+{
+ u64 kernel_start = machine__kernel_start(machine);
+ struct call_path *current = &dbe->cpr->call_path;
+ enum chain_order saved_order = callchain_param.order;
+ int err;
+
+ if (!symbol_conf.use_callchain || !sample->callchain)
+ return NULL;
+
+ /*
+ * Since the call path tree must be built starting with the root, we
+ * must use ORDER_CALL for call chain resolution, in order to process
+ * the callchain starting with the root node and ending with the leaf.
+ */
+ callchain_param.order = ORDER_CALLER;
+ err = thread__resolve_callchain(thread, &callchain_cursor, evsel,
+ sample, NULL, NULL,
+ sysctl_perf_event_max_stack);
+ if (err) {
+ callchain_param.order = saved_order;
+ return NULL;
+ }
+ callchain_cursor_commit(&callchain_cursor);
+
+ while (1) {
+ struct callchain_cursor_node *node;
+ struct addr_location al;
+ u64 dso_db_id = 0, sym_db_id = 0, offset = 0;
+
+ memset(&al, 0, sizeof(al));
+
+ node = callchain_cursor_current(&callchain_cursor);
+ if (!node)
+ break;
+ /*
+ * Handle export of symbol and dso for this node by
+ * constructing an addr_location struct and then passing it to
+ * db_ids_from_al() to perform the export.
+ */
+ al.sym = node->sym;
+ al.map = node->map;
+ al.machine = machine;
+ al.addr = node->ip;
+
+ if (al.map && !al.sym)
+ al.sym = dso__find_symbol(al.map->dso, MAP__FUNCTION,
+ al.addr);
+
+ db_ids_from_al(dbe, &al, &dso_db_id, &sym_db_id, &offset);
+
+ /* add node to the call path tree if it doesn't exist */
+ current = call_path__findnew(dbe->cpr, current,
+ al.sym, node->ip,
+ kernel_start);
+
+ callchain_cursor_advance(&callchain_cursor);
+ }
+
+ /* Reset the callchain order to its prior value. */
+ callchain_param.order = saved_order;
+
+ if (current == &dbe->cpr->call_path) {
+ /* Bail because the callchain was empty. */
+ return NULL;
+ }
+
+ return current;
+}
+
int db_export__branch_type(struct db_export *dbe, u32 branch_type,
const char *name)
{
@@ -329,6 +404,16 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
if (err)
goto out_put;
+ if (dbe->cpr) {
+ struct call_path *cp = call_path_from_sample(dbe, al->machine,
+ thread, sample,
+ evsel);
+ if (cp) {
+ db_export__call_path(dbe, cp);
+ es.call_path_id = cp->db_id;
+ }
+ }
+
if ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
sample_addr_correlates_sym(&evsel->attr)) {
struct addr_location addr_al;
diff --git a/tools/perf/util/db-export.h b/tools/perf/util/db-export.h
index 25e22fd76aca..67bc6b8ad2d6 100644
--- a/tools/perf/util/db-export.h
+++ b/tools/perf/util/db-export.h
@@ -27,6 +27,7 @@ struct dso;
struct perf_sample;
struct addr_location;
struct call_return_processor;
+struct call_path_root;
struct call_path;
struct call_return;
@@ -43,6 +44,7 @@ struct export_sample {
u64 addr_dso_db_id;
u64 addr_sym_db_id;
u64 addr_offset; /* addr offset from symbol start */
+ u64 call_path_id;
};
struct db_export {
@@ -64,6 +66,7 @@ struct db_export {
int (*export_call_return)(struct db_export *dbe,
struct call_return *cr);
struct call_return_processor *crp;
+ struct call_path_root *cpr;
u64 evsel_last_db_id;
u64 machine_last_db_id;
u64 thread_last_db_id;
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 8e6395439ca0..3357479082ca 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -38,7 +38,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
enum dso_binary_type type,
char *root_dir, char *filename, size_t size)
{
- char build_id_hex[BUILD_ID_SIZE * 2 + 1];
+ char build_id_hex[SBUILD_ID_SIZE];
int ret = 0;
size_t len;
@@ -1301,7 +1301,7 @@ size_t __dsos__fprintf(struct list_head *head, FILE *fp)
size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
{
- char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+ char sbuild_id[SBUILD_ID_SIZE];
build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
return fprintf(fp, "%s", sbuild_id);
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index aea189b41cc8..a347b19c961a 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -915,8 +915,7 @@ int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf)
tmp = "*";
else if (tag == DW_TAG_subroutine_type) {
/* Function pointer */
- strbuf_add(buf, "(function_type)", 15);
- return 0;
+ return strbuf_add(buf, "(function_type)", 15);
} else {
if (!dwarf_diename(&type))
return -ENOENT;
@@ -927,14 +926,10 @@ int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf)
else if (tag == DW_TAG_enumeration_type)
tmp = "enum ";
/* Write a base name */
- strbuf_addf(buf, "%s%s", tmp, dwarf_diename(&type));
- return 0;
+ return strbuf_addf(buf, "%s%s", tmp, dwarf_diename(&type));
}
ret = die_get_typename(&type, buf);
- if (ret == 0)
- strbuf_addstr(buf, tmp);
-
- return ret;
+ return ret ? ret : strbuf_addstr(buf, tmp);
}
/**
@@ -951,12 +946,10 @@ int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf)
ret = die_get_typename(vr_die, buf);
if (ret < 0) {
pr_debug("Failed to get type, make it unknown.\n");
- strbuf_add(buf, " (unknown_type)", 14);
+ ret = strbuf_add(buf, " (unknown_type)", 14);
}
- strbuf_addf(buf, "\t%s", dwarf_diename(vr_die));
-
- return 0;
+ return ret < 0 ? ret : strbuf_addf(buf, "\t%s", dwarf_diename(vr_die));
}
#ifdef HAVE_DWARF_GETLOCATIONS
@@ -999,22 +992,24 @@ static int die_get_var_innermost_scope(Dwarf_Die *sp_die, Dwarf_Die *vr_die,
}
while ((offset = dwarf_ranges(&scopes[1], offset, &base,
- &start, &end)) > 0) {
+ &start, &end)) > 0) {
start -= entry;
end -= entry;
if (first) {
- strbuf_addf(buf, "@<%s+[%" PRIu64 "-%" PRIu64,
- name, start, end);
+ ret = strbuf_addf(buf, "@<%s+[%" PRIu64 "-%" PRIu64,
+ name, start, end);
first = false;
} else {
- strbuf_addf(buf, ",%" PRIu64 "-%" PRIu64,
- start, end);
+ ret = strbuf_addf(buf, ",%" PRIu64 "-%" PRIu64,
+ start, end);
}
+ if (ret < 0)
+ goto out;
}
if (!first)
- strbuf_add(buf, "]>", 2);
+ ret = strbuf_add(buf, "]>", 2);
out:
free(scopes);
@@ -1054,31 +1049,32 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf)
if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL)
return -EINVAL;
- while ((offset = dwarf_getlocations(
- &attr, offset, &base,
- &start, &end, &op, &nops)) > 0) {
+ while ((offset = dwarf_getlocations(&attr, offset, &base,
+ &start, &end, &op, &nops)) > 0) {
if (start == 0) {
/* Single Location Descriptions */
ret = die_get_var_innermost_scope(sp_die, vr_die, buf);
- return ret;
+ goto out;
}
/* Location Lists */
start -= entry;
end -= entry;
if (first) {
- strbuf_addf(buf, "@<%s+[%" PRIu64 "-%" PRIu64,
- name, start, end);
+ ret = strbuf_addf(buf, "@<%s+[%" PRIu64 "-%" PRIu64,
+ name, start, end);
first = false;
} else {
- strbuf_addf(buf, ",%" PRIu64 "-%" PRIu64,
- start, end);
+ ret = strbuf_addf(buf, ",%" PRIu64 "-%" PRIu64,
+ start, end);
}
+ if (ret < 0)
+ goto out;
}
if (!first)
- strbuf_add(buf, "]>", 2);
-
+ ret = strbuf_add(buf, "]>", 2);
+out:
return ret;
}
#else
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index edcf4ed4e99c..f6fcc6832949 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -45,6 +45,7 @@ static const char *perf_event__names[] = {
[PERF_RECORD_STAT] = "STAT",
[PERF_RECORD_STAT_ROUND] = "STAT_ROUND",
[PERF_RECORD_EVENT_UPDATE] = "EVENT_UPDATE",
+ [PERF_RECORD_TIME_CONV] = "TIME_CONV",
};
const char *perf_event__name(unsigned int id)
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 6bb1c928350d..8d363d5e65a2 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -233,6 +233,7 @@ enum perf_user_event_type { /* above any possible kernel type */
PERF_RECORD_STAT = 76,
PERF_RECORD_STAT_ROUND = 77,
PERF_RECORD_EVENT_UPDATE = 78,
+ PERF_RECORD_TIME_CONV = 79,
PERF_RECORD_HEADER_MAX
};
@@ -469,6 +470,13 @@ struct stat_round_event {
u64 time;
};
+struct time_conv_event {
+ struct perf_event_header header;
+ u64 time_shift;
+ u64 time_mult;
+ u64 time_zero;
+};
+
union perf_event {
struct perf_event_header header;
struct mmap_event mmap;
@@ -497,6 +505,7 @@ union perf_event {
struct stat_config_event stat_config;
struct stat_event stat;
struct stat_round_event stat_round;
+ struct time_conv_event time_conv;
};
void perf_event__print_totals(void);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 86a03836a83f..c4bfe11479a0 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -679,53 +679,52 @@ static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
return NULL;
}
-union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
+/* When check_messup is true, 'end' must points to a good entry */
+static union perf_event *
+perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
+ u64 end, u64 *prev)
{
- struct perf_mmap *md = &evlist->mmap[idx];
- u64 head;
- u64 old = md->prev;
unsigned char *data = md->base + page_size;
union perf_event *event = NULL;
+ int diff = end - start;
- /*
- * Check if event was unmapped due to a POLLHUP/POLLERR.
- */
- if (!atomic_read(&md->refcnt))
- return NULL;
-
- head = perf_mmap__read_head(md);
- if (evlist->overwrite) {
+ if (check_messup) {
/*
* If we're further behind than half the buffer, there's a chance
* the writer will bite our tail and mess up the samples under us.
*
- * If we somehow ended up ahead of the head, we got messed up.
+ * If we somehow ended up ahead of the 'end', we got messed up.
*
- * In either case, truncate and restart at head.
+ * In either case, truncate and restart at 'end'.
*/
- int diff = head - old;
if (diff > md->mask / 2 || diff < 0) {
fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
/*
- * head points to a known good entry, start there.
+ * 'end' points to a known good entry, start there.
*/
- old = head;
+ start = end;
+ diff = 0;
}
}
- if (old != head) {
+ if (diff >= (int)sizeof(event->header)) {
size_t size;
- event = (union perf_event *)&data[old & md->mask];
+ event = (union perf_event *)&data[start & md->mask];
size = event->header.size;
+ if (size < sizeof(event->header) || diff < (int)size) {
+ event = NULL;
+ goto broken_event;
+ }
+
/*
* Event straddles the mmap boundary -- header should always
* be inside due to u64 alignment of output.
*/
- if ((old & md->mask) + size != ((old + size) & md->mask)) {
- unsigned int offset = old;
+ if ((start & md->mask) + size != ((start + size) & md->mask)) {
+ unsigned int offset = start;
unsigned int len = min(sizeof(*event), size), cpy;
void *dst = md->event_copy;
@@ -740,14 +739,83 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
event = (union perf_event *) md->event_copy;
}
- old += size;
+ start += size;
}
- md->prev = old;
+broken_event:
+ if (prev)
+ *prev = start;
return event;
}
+union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
+{
+ struct perf_mmap *md = &evlist->mmap[idx];
+ u64 head;
+ u64 old = md->prev;
+
+ /*
+ * Check if event was unmapped due to a POLLHUP/POLLERR.
+ */
+ if (!atomic_read(&md->refcnt))
+ return NULL;
+
+ head = perf_mmap__read_head(md);
+
+ return perf_mmap__read(md, evlist->overwrite, old, head, &md->prev);
+}
+
+union perf_event *
+perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
+{
+ struct perf_mmap *md = &evlist->mmap[idx];
+ u64 head, end;
+ u64 start = md->prev;
+
+ /*
+ * Check if event was unmapped due to a POLLHUP/POLLERR.
+ */
+ if (!atomic_read(&md->refcnt))
+ return NULL;
+
+ head = perf_mmap__read_head(md);
+ if (!head)
+ return NULL;
+
+ /*
+ * 'head' pointer starts from 0. Kernel minus sizeof(record) form
+ * it each time when kernel writes to it, so in fact 'head' is
+ * negative. 'end' pointer is made manually by adding the size of
+ * the ring buffer to 'head' pointer, means the validate data can
+ * read is the whole ring buffer. If 'end' is positive, the ring
+ * buffer has not fully filled, so we must adjust 'end' to 0.
+ *
+ * However, since both 'head' and 'end' is unsigned, we can't
+ * simply compare 'end' against 0. Here we compare '-head' and
+ * the size of the ring buffer, where -head is the number of bytes
+ * kernel write to the ring buffer.
+ */
+ if (-head < (u64)(md->mask + 1))
+ end = 0;
+ else
+ end = head + md->mask + 1;
+
+ return perf_mmap__read(md, false, start, end, &md->prev);
+}
+
+void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
+{
+ struct perf_mmap *md = &evlist->mmap[idx];
+ u64 head;
+
+ if (!atomic_read(&md->refcnt))
+ return;
+
+ head = perf_mmap__read_head(md);
+ md->prev = head;
+}
+
static bool perf_mmap__empty(struct perf_mmap *md)
{
return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
@@ -986,26 +1054,34 @@ out_unmap:
return -1;
}
-static size_t perf_evlist__mmap_size(unsigned long pages)
+unsigned long perf_event_mlock_kb_in_pages(void)
{
- if (pages == UINT_MAX) {
- int max;
+ unsigned long pages;
+ int max;
- if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
- /*
- * Pick a once upon a time good value, i.e. things look
- * strange since we can't read a sysctl value, but lets not
- * die yet...
- */
- max = 512;
- } else {
- max -= (page_size / 1024);
- }
+ if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
+ /*
+ * Pick a once upon a time good value, i.e. things look
+ * strange since we can't read a sysctl value, but lets not
+ * die yet...
+ */
+ max = 512;
+ } else {
+ max -= (page_size / 1024);
+ }
- pages = (max * 1024) / page_size;
- if (!is_power_of_2(pages))
- pages = rounddown_pow_of_two(pages);
- } else if (!is_power_of_2(pages))
+ pages = (max * 1024) / page_size;
+ if (!is_power_of_2(pages))
+ pages = rounddown_pow_of_two(pages);
+
+ return pages;
+}
+
+static size_t perf_evlist__mmap_size(unsigned long pages)
+{
+ if (pages == UINT_MAX)
+ pages = perf_event_mlock_kb_in_pages();
+ else if (!is_power_of_2(pages))
return 0;
return (pages + 1) * page_size;
@@ -1192,6 +1268,24 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
perf_evlist__propagate_maps(evlist);
}
+void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
+ enum perf_event_sample_format bit)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each(evlist, evsel)
+ __perf_evsel__set_sample_bit(evsel, bit);
+}
+
+void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
+ enum perf_event_sample_format bit)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each(evlist, evsel)
+ __perf_evsel__reset_sample_bit(evsel, bit);
+}
+
int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
{
struct perf_evsel *evsel;
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index a0d15221db6e..85d1b59802e8 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -87,6 +87,17 @@ int perf_evlist__add_dummy(struct perf_evlist *evlist);
int perf_evlist__add_newtp(struct perf_evlist *evlist,
const char *sys, const char *name, void *handler);
+void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
+ enum perf_event_sample_format bit);
+void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
+ enum perf_event_sample_format bit);
+
+#define perf_evlist__set_sample_bit(evlist, bit) \
+ __perf_evlist__set_sample_bit(evlist, PERF_SAMPLE_##bit)
+
+#define perf_evlist__reset_sample_bit(evlist, bit) \
+ __perf_evlist__reset_sample_bit(evlist, PERF_SAMPLE_##bit)
+
int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter);
int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid);
int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids);
@@ -118,16 +129,23 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
+union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist,
+ int idx);
+void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
+
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
int perf_evlist__open(struct perf_evlist *evlist);
void perf_evlist__close(struct perf_evlist *evlist);
+struct callchain_param;
+
void perf_evlist__set_id_pos(struct perf_evlist *evlist);
bool perf_can_sample_identifier(void);
bool perf_can_record_switch_events(void);
bool perf_can_record_cpu_wide(void);
-void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts);
+void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
+ struct callchain_param *callchain);
int record_opts__config(struct record_opts *opts);
int perf_evlist__prepare_workload(struct perf_evlist *evlist,
@@ -144,6 +162,8 @@ int perf_evlist__parse_mmap_pages(const struct option *opt,
const char *str,
int unset);
+unsigned long perf_event_mlock_kb_in_pages(void);
+
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
bool overwrite, unsigned int auxtrace_pages,
bool auxtrace_overwrite);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 645dc1828836..964c7c3602c0 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -226,7 +226,8 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
perf_evsel__init(evsel, attr, idx);
if (perf_evsel__is_bpf_output(evsel)) {
- evsel->attr.sample_type |= PERF_SAMPLE_RAW;
+ evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
+ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
evsel->attr.sample_period = 1;
}
@@ -561,10 +562,9 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
return ret;
}
-static void
-perf_evsel__config_callgraph(struct perf_evsel *evsel,
- struct record_opts *opts,
- struct callchain_param *param)
+void perf_evsel__config_callchain(struct perf_evsel *evsel,
+ struct record_opts *opts,
+ struct callchain_param *param)
{
bool function = perf_evsel__is_function_event(evsel);
struct perf_event_attr *attr = &evsel->attr;
@@ -704,7 +704,7 @@ static void apply_config_terms(struct perf_evsel *evsel,
/* set perf-event callgraph */
if (param.enabled)
- perf_evsel__config_callgraph(evsel, opts, &param);
+ perf_evsel__config_callchain(evsel, opts, &param);
}
}
@@ -736,7 +736,8 @@ static void apply_config_terms(struct perf_evsel *evsel,
* enable/disable events specifically, as there's no
* initial traced exec call.
*/
-void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
+void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
+ struct callchain_param *callchain)
{
struct perf_evsel *leader = evsel->leader;
struct perf_event_attr *attr = &evsel->attr;
@@ -811,8 +812,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
if (perf_evsel__is_function_event(evsel))
evsel->attr.exclude_callchain_user = 1;
- if (callchain_param.enabled && !evsel->no_aux_samples)
- perf_evsel__config_callgraph(evsel, opts, &callchain_param);
+ if (callchain && callchain->enabled && !evsel->no_aux_samples)
+ perf_evsel__config_callchain(evsel, opts, callchain);
if (opts->sample_intr_regs) {
attr->sample_regs_intr = opts->sample_intr_regs;
@@ -1230,6 +1231,21 @@ static void __p_sample_type(char *buf, size_t size, u64 value)
__p_bits(buf, size, value, bits);
}
+static void __p_branch_sample_type(char *buf, size_t size, u64 value)
+{
+#define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
+ struct bit_names bits[] = {
+ bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY),
+ bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL),
+ bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
+ bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
+ bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
+ { .name = NULL, }
+ };
+#undef bit_name
+ __p_bits(buf, size, value, bits);
+}
+
static void __p_read_format(char *buf, size_t size, u64 value)
{
#define bit_name(n) { PERF_FORMAT_##n, #n }
@@ -1248,6 +1264,7 @@ static void __p_read_format(char *buf, size_t size, u64 value)
#define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
#define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
#define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
+#define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
#define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
#define PRINT_ATTRn(_n, _f, _p) \
@@ -1299,12 +1316,13 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(comm_exec, p_unsigned);
PRINT_ATTRf(use_clockid, p_unsigned);
PRINT_ATTRf(context_switch, p_unsigned);
+ PRINT_ATTRf(write_backward, p_unsigned);
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
PRINT_ATTRf(bp_type, p_unsigned);
PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
- PRINT_ATTRf(branch_sample_type, p_unsigned);
+ PRINT_ATTRf(branch_sample_type, p_branch_sample_type);
PRINT_ATTRf(sample_regs_user, p_hex);
PRINT_ATTRf(sample_stack_user, p_unsigned);
PRINT_ATTRf(clockid, p_signed);
@@ -2253,95 +2271,6 @@ u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
return 0;
}
-static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
-{
- va_list args;
- int ret = 0;
-
- if (!*first) {
- ret += fprintf(fp, ",");
- } else {
- ret += fprintf(fp, ":");
- *first = false;
- }
-
- va_start(args, fmt);
- ret += vfprintf(fp, fmt, args);
- va_end(args);
- return ret;
-}
-
-static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv)
-{
- return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val);
-}
-
-int perf_evsel__fprintf(struct perf_evsel *evsel,
- struct perf_attr_details *details, FILE *fp)
-{
- bool first = true;
- int printed = 0;
-
- if (details->event_group) {
- struct perf_evsel *pos;
-
- if (!perf_evsel__is_group_leader(evsel))
- return 0;
-
- if (evsel->nr_members > 1)
- printed += fprintf(fp, "%s{", evsel->group_name ?: "");
-
- printed += fprintf(fp, "%s", perf_evsel__name(evsel));
- for_each_group_member(pos, evsel)
- printed += fprintf(fp, ",%s", perf_evsel__name(pos));
-
- if (evsel->nr_members > 1)
- printed += fprintf(fp, "}");
- goto out;
- }
-
- printed += fprintf(fp, "%s", perf_evsel__name(evsel));
-
- if (details->verbose) {
- printed += perf_event_attr__fprintf(fp, &evsel->attr,
- __print_attr__fprintf, &first);
- } else if (details->freq) {
- const char *term = "sample_freq";
-
- if (!evsel->attr.freq)
- term = "sample_period";
-
- printed += comma_fprintf(fp, &first, " %s=%" PRIu64,
- term, (u64)evsel->attr.sample_freq);
- }
-
- if (details->trace_fields) {
- struct format_field *field;
-
- if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
- printed += comma_fprintf(fp, &first, " (not a tracepoint)");
- goto out;
- }
-
- field = evsel->tp_format->format.fields;
- if (field == NULL) {
- printed += comma_fprintf(fp, &first, " (no trace field)");
- goto out;
- }
-
- printed += comma_fprintf(fp, &first, " trace_fields: %s", field->name);
-
- field = field->next;
- while (field) {
- printed += comma_fprintf(fp, &first, "%s", field->name);
- field = field->next;
- }
- }
-out:
- fputc('\n', fp);
- return ++printed;
-}
-
bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
char *msg, size_t msgsize)
{
@@ -2416,10 +2345,18 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
"Probably the maximum number of open file descriptors has been reached.\n"
"Hint: Try again after reducing the number of events.\n"
"Hint: Try increasing the limit with 'ulimit -n <limit>'");
+ case ENOMEM:
+ if ((evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0 &&
+ access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
+ return scnprintf(msg, size,
+ "Not enough memory to setup event with callchain.\n"
+ "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
+ "Hint: Current value: %d", sysctl_perf_event_max_stack);
+ break;
case ENODEV:
if (target->cpu_list)
return scnprintf(msg, size, "%s",
- "No such device - did you specify an out-of-range profile CPU?\n");
+ "No such device - did you specify an out-of-range profile CPU?");
break;
case EOPNOTSUPP:
if (evsel->attr.precise_ip)
@@ -2451,7 +2388,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
return scnprintf(msg, size,
"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
"/bin/dmesg may provide additional information.\n"
- "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
+ "No CONFIG_PERF_EVENTS=y kernel support configured?",
err, strerror_r(err, sbuf, sizeof(sbuf)),
perf_evsel__name(evsel));
}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 501ea6e565f1..8a644fef452c 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -178,8 +178,14 @@ void perf_evsel__init(struct perf_evsel *evsel,
void perf_evsel__exit(struct perf_evsel *evsel);
void perf_evsel__delete(struct perf_evsel *evsel);
+struct callchain_param;
+
void perf_evsel__config(struct perf_evsel *evsel,
- struct record_opts *opts);
+ struct record_opts *opts,
+ struct callchain_param *callchain);
+void perf_evsel__config_callchain(struct perf_evsel *evsel,
+ struct record_opts *opts,
+ struct callchain_param *callchain);
int __perf_evsel__sample_size(u64 sample_type);
void perf_evsel__calc_id_pos(struct perf_evsel *evsel);
@@ -381,6 +387,24 @@ struct perf_attr_details {
int perf_evsel__fprintf(struct perf_evsel *evsel,
struct perf_attr_details *details, FILE *fp);
+#define EVSEL__PRINT_IP (1<<0)
+#define EVSEL__PRINT_SYM (1<<1)
+#define EVSEL__PRINT_DSO (1<<2)
+#define EVSEL__PRINT_SYMOFFSET (1<<3)
+#define EVSEL__PRINT_ONELINE (1<<4)
+#define EVSEL__PRINT_SRCLINE (1<<5)
+#define EVSEL__PRINT_UNKNOWN_AS_ADDR (1<<6)
+
+struct callchain_cursor;
+
+int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
+ unsigned int print_opts,
+ struct callchain_cursor *cursor, FILE *fp);
+
+int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
+ int left_alignment, unsigned int print_opts,
+ struct callchain_cursor *cursor, FILE *fp);
+
bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
char *msg, size_t msgsize);
int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
@@ -396,7 +420,7 @@ for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \
(_evsel) && (_evsel)->leader == (_leader); \
(_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
-static inline bool has_branch_callstack(struct perf_evsel *evsel)
+static inline bool perf_evsel__has_branch_callstack(const struct perf_evsel *evsel)
{
return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
}
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
new file mode 100644
index 000000000000..3674e77ad640
--- /dev/null
+++ b/tools/perf/util/evsel_fprintf.c
@@ -0,0 +1,212 @@
+#include <stdio.h>
+#include <stdbool.h>
+#include <traceevent/event-parse.h>
+#include "evsel.h"
+#include "callchain.h"
+#include "map.h"
+#include "symbol.h"
+
+static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (!*first) {
+ ret += fprintf(fp, ",");
+ } else {
+ ret += fprintf(fp, ":");
+ *first = false;
+ }
+
+ va_start(args, fmt);
+ ret += vfprintf(fp, fmt, args);
+ va_end(args);
+ return ret;
+}
+
+static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv)
+{
+ return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val);
+}
+
+int perf_evsel__fprintf(struct perf_evsel *evsel,
+ struct perf_attr_details *details, FILE *fp)
+{
+ bool first = true;
+ int printed = 0;
+
+ if (details->event_group) {
+ struct perf_evsel *pos;
+
+ if (!perf_evsel__is_group_leader(evsel))
+ return 0;
+
+ if (evsel->nr_members > 1)
+ printed += fprintf(fp, "%s{", evsel->group_name ?: "");
+
+ printed += fprintf(fp, "%s", perf_evsel__name(evsel));
+ for_each_group_member(pos, evsel)
+ printed += fprintf(fp, ",%s", perf_evsel__name(pos));
+
+ if (evsel->nr_members > 1)
+ printed += fprintf(fp, "}");
+ goto out;
+ }
+
+ printed += fprintf(fp, "%s", perf_evsel__name(evsel));
+
+ if (details->verbose) {
+ printed += perf_event_attr__fprintf(fp, &evsel->attr,
+ __print_attr__fprintf, &first);
+ } else if (details->freq) {
+ const char *term = "sample_freq";
+
+ if (!evsel->attr.freq)
+ term = "sample_period";
+
+ printed += comma_fprintf(fp, &first, " %s=%" PRIu64,
+ term, (u64)evsel->attr.sample_freq);
+ }
+
+ if (details->trace_fields) {
+ struct format_field *field;
+
+ if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
+ printed += comma_fprintf(fp, &first, " (not a tracepoint)");
+ goto out;
+ }
+
+ field = evsel->tp_format->format.fields;
+ if (field == NULL) {
+ printed += comma_fprintf(fp, &first, " (no trace field)");
+ goto out;
+ }
+
+ printed += comma_fprintf(fp, &first, " trace_fields: %s", field->name);
+
+ field = field->next;
+ while (field) {
+ printed += comma_fprintf(fp, &first, "%s", field->name);
+ field = field->next;
+ }
+ }
+out:
+ fputc('\n', fp);
+ return ++printed;
+}
+
+int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
+ unsigned int print_opts, struct callchain_cursor *cursor,
+ FILE *fp)
+{
+ int printed = 0;
+ struct callchain_cursor_node *node;
+ int print_ip = print_opts & EVSEL__PRINT_IP;
+ int print_sym = print_opts & EVSEL__PRINT_SYM;
+ int print_dso = print_opts & EVSEL__PRINT_DSO;
+ int print_symoffset = print_opts & EVSEL__PRINT_SYMOFFSET;
+ int print_oneline = print_opts & EVSEL__PRINT_ONELINE;
+ int print_srcline = print_opts & EVSEL__PRINT_SRCLINE;
+ int print_unknown_as_addr = print_opts & EVSEL__PRINT_UNKNOWN_AS_ADDR;
+ char s = print_oneline ? ' ' : '\t';
+
+ if (sample->callchain) {
+ struct addr_location node_al;
+
+ callchain_cursor_commit(cursor);
+
+ while (1) {
+ u64 addr = 0;
+
+ node = callchain_cursor_current(cursor);
+ if (!node)
+ break;
+
+ if (node->sym && node->sym->ignore)
+ goto next;
+
+ printed += fprintf(fp, "%-*.*s", left_alignment, left_alignment, " ");
+
+ if (print_ip)
+ printed += fprintf(fp, "%c%16" PRIx64, s, node->ip);
+
+ if (node->map)
+ addr = node->map->map_ip(node->map, node->ip);
+
+ if (print_sym) {
+ printed += fprintf(fp, " ");
+ node_al.addr = addr;
+ node_al.map = node->map;
+
+ if (print_symoffset) {
+ printed += __symbol__fprintf_symname_offs(node->sym, &node_al,
+ print_unknown_as_addr, fp);
+ } else {
+ printed += __symbol__fprintf_symname(node->sym, &node_al,
+ print_unknown_as_addr, fp);
+ }
+ }
+
+ if (print_dso) {
+ printed += fprintf(fp, " (");
+ printed += map__fprintf_dsoname(node->map, fp);
+ printed += fprintf(fp, ")");
+ }
+
+ if (print_srcline)
+ printed += map__fprintf_srcline(node->map, addr, "\n ", fp);
+
+ if (!print_oneline)
+ printed += fprintf(fp, "\n");
+next:
+ callchain_cursor_advance(cursor);
+ }
+ }
+
+ return printed;
+}
+
+int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
+ int left_alignment, unsigned int print_opts,
+ struct callchain_cursor *cursor, FILE *fp)
+{
+ int printed = 0;
+ int print_ip = print_opts & EVSEL__PRINT_IP;
+ int print_sym = print_opts & EVSEL__PRINT_SYM;
+ int print_dso = print_opts & EVSEL__PRINT_DSO;
+ int print_symoffset = print_opts & EVSEL__PRINT_SYMOFFSET;
+ int print_srcline = print_opts & EVSEL__PRINT_SRCLINE;
+ int print_unknown_as_addr = print_opts & EVSEL__PRINT_UNKNOWN_AS_ADDR;
+
+ if (cursor != NULL) {
+ printed += sample__fprintf_callchain(sample, left_alignment,
+ print_opts, cursor, fp);
+ } else if (!(al->sym && al->sym->ignore)) {
+ printed += fprintf(fp, "%-*.*s", left_alignment, left_alignment, " ");
+
+ if (print_ip)
+ printed += fprintf(fp, "%16" PRIx64, sample->ip);
+
+ if (print_sym) {
+ printed += fprintf(fp, " ");
+ if (print_symoffset) {
+ printed += __symbol__fprintf_symname_offs(al->sym, al,
+ print_unknown_as_addr, fp);
+ } else {
+ printed += __symbol__fprintf_symname(al->sym, al,
+ print_unknown_as_addr, fp);
+ }
+ }
+
+ if (print_dso) {
+ printed += fprintf(fp, " (");
+ printed += map__fprintf_dsoname(al->map, fp);
+ printed += fprintf(fp, ")");
+ }
+
+ if (print_srcline)
+ printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
+ }
+
+ return printed;
+}
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 90680ec9f8b8..08852dde1378 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1474,7 +1474,7 @@ static int __event_process_build_id(struct build_id_event *bev,
dso = machine__findnew_dso(machine, filename);
if (dso != NULL) {
- char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+ char sbuild_id[SBUILD_ID_SIZE];
dso__set_build_id(dso, &bev->build_id);
@@ -1819,7 +1819,8 @@ static int process_cpu_topology(struct perf_file_section *section,
ph->env.nr_sibling_cores = nr;
size += sizeof(u32);
- strbuf_init(&sb, 128);
+ if (strbuf_init(&sb, 128) < 0)
+ goto free_cpu;
for (i = 0; i < nr; i++) {
str = do_read_string(fd, ph);
@@ -1827,7 +1828,8 @@ static int process_cpu_topology(struct perf_file_section *section,
goto error;
/* include a NULL character at the end */
- strbuf_add(&sb, str, strlen(str) + 1);
+ if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
+ goto error;
size += string_size(str);
free(str);
}
@@ -1849,7 +1851,8 @@ static int process_cpu_topology(struct perf_file_section *section,
goto error;
/* include a NULL character at the end */
- strbuf_add(&sb, str, strlen(str) + 1);
+ if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
+ goto error;
size += string_size(str);
free(str);
}
@@ -1912,13 +1915,14 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
/* nr nodes */
ret = readn(fd, &nr, sizeof(nr));
if (ret != sizeof(nr))
- goto error;
+ return -1;
if (ph->needs_swap)
nr = bswap_32(nr);
ph->env.nr_numa_nodes = nr;
- strbuf_init(&sb, 256);
+ if (strbuf_init(&sb, 256) < 0)
+ return -1;
for (i = 0; i < nr; i++) {
/* node number */
@@ -1940,15 +1944,17 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
mem_free = bswap_64(mem_free);
}
- strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":",
- node, mem_total, mem_free);
+ if (strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":",
+ node, mem_total, mem_free) < 0)
+ goto error;
str = do_read_string(fd, ph);
if (!str)
goto error;
/* include a NULL character at the end */
- strbuf_add(&sb, str, strlen(str) + 1);
+ if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
+ goto error;
free(str);
}
ph->env.numa_nodes = strbuf_detach(&sb, NULL);
@@ -1982,7 +1988,8 @@ static int process_pmu_mappings(struct perf_file_section *section __maybe_unused
}
ph->env.nr_pmu_mappings = pmu_num;
- strbuf_init(&sb, 128);
+ if (strbuf_init(&sb, 128) < 0)
+ return -1;
while (pmu_num) {
if (readn(fd, &type, sizeof(type)) != sizeof(type))
@@ -1994,9 +2001,11 @@ static int process_pmu_mappings(struct perf_file_section *section __maybe_unused
if (!name)
goto error;
- strbuf_addf(&sb, "%u:%s", type, name);
+ if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
+ goto error;
/* include a NULL character at the end */
- strbuf_add(&sb, "", 1);
+ if (strbuf_add(&sb, "", 1) < 0)
+ goto error;
if (!strcmp(name, "msr"))
ph->env.msr_pmu_type = type;
diff --git a/tools/perf/util/help-unknown-cmd.c b/tools/perf/util/help-unknown-cmd.c
index 43a98a4dc1e1..d62ccaeeadd6 100644
--- a/tools/perf/util/help-unknown-cmd.c
+++ b/tools/perf/util/help-unknown-cmd.c
@@ -27,16 +27,27 @@ static int levenshtein_compare(const void *p1, const void *p2)
return l1 != l2 ? l1 - l2 : strcmp(s1, s2);
}
-static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old)
+static int add_cmd_list(struct cmdnames *cmds, struct cmdnames *old)
{
- unsigned int i;
-
- ALLOC_GROW(cmds->names, cmds->cnt + old->cnt, cmds->alloc);
-
+ unsigned int i, nr = cmds->cnt + old->cnt;
+ void *tmp;
+
+ if (nr > cmds->alloc) {
+ /* Choose bigger one to alloc */
+ if (alloc_nr(cmds->alloc) < nr)
+ cmds->alloc = nr;
+ else
+ cmds->alloc = alloc_nr(cmds->alloc);
+ tmp = realloc(cmds->names, cmds->alloc * sizeof(*cmds->names));
+ if (!tmp)
+ return -1;
+ cmds->names = tmp;
+ }
for (i = 0; i < old->cnt; i++)
cmds->names[cmds->cnt++] = old->names[i];
zfree(&old->names);
old->cnt = 0;
+ return 0;
}
const char *help_unknown_cmd(const char *cmd)
@@ -52,8 +63,11 @@ const char *help_unknown_cmd(const char *cmd)
load_command_list("perf-", &main_cmds, &other_cmds);
- add_cmd_list(&main_cmds, &aliases);
- add_cmd_list(&main_cmds, &other_cmds);
+ if (add_cmd_list(&main_cmds, &aliases) < 0 ||
+ add_cmd_list(&main_cmds, &other_cmds) < 0) {
+ fprintf(stderr, "ERROR: Failed to allocate command list for unknown command.\n");
+ goto end;
+ }
qsort(main_cmds.names, main_cmds.cnt,
sizeof(main_cmds.names), cmdname_compare);
uniq(&main_cmds);
@@ -99,6 +113,6 @@ const char *help_unknown_cmd(const char *cmd)
for (i = 0; i < n; i++)
fprintf(stderr, "\t%s\n", main_cmds.names[i]->name);
}
-
+end:
exit(1);
}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 31c4641fe5ff..cfab531437c7 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -295,7 +295,7 @@ static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
root_in = &he->parent_he->hroot_in;
root_out = &he->parent_he->hroot_out;
} else {
- if (sort__need_collapse)
+ if (hists__has(hists, need_collapse))
root_in = &hists->entries_collapsed;
else
root_in = hists->entries_in;
@@ -953,7 +953,7 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
{
int err, err2;
- err = sample__resolve_callchain(iter->sample, &iter->parent,
+ err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
iter->evsel, al, max_stack_depth);
if (err)
return err;
@@ -1295,8 +1295,9 @@ static int hists__hierarchy_insert_entry(struct hists *hists,
return ret;
}
-int hists__collapse_insert_entry(struct hists *hists, struct rb_root *root,
- struct hist_entry *he)
+static int hists__collapse_insert_entry(struct hists *hists,
+ struct rb_root *root,
+ struct hist_entry *he)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
@@ -1372,7 +1373,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
struct hist_entry *n;
int ret;
- if (!sort__need_collapse)
+ if (!hists__has(hists, need_collapse))
return 0;
hists->nr_entries = 0;
@@ -1631,7 +1632,7 @@ static void output_resort(struct hists *hists, struct ui_progress *prog,
return;
}
- if (sort__need_collapse)
+ if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
@@ -2035,7 +2036,7 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
struct hist_entry *he;
int64_t cmp;
- if (sort__need_collapse)
+ if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
@@ -2061,6 +2062,8 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
if (he) {
memset(&he->stat, 0, sizeof(he->stat));
he->hists = hists;
+ if (symbol_conf.cumulate_callchain)
+ memset(he->stat_acc, 0, sizeof(he->stat));
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, root);
hists__inc_stats(hists, he);
@@ -2075,7 +2078,7 @@ static struct hist_entry *hists__find_entry(struct hists *hists,
{
struct rb_node *n;
- if (sort__need_collapse)
+ if (hists__has(hists, need_collapse))
n = hists->entries_collapsed.rb_node;
else
n = hists->entries_in->rb_node;
@@ -2104,7 +2107,7 @@ void hists__match(struct hists *leader, struct hists *other)
struct rb_node *nd;
struct hist_entry *pos, *pair;
- if (sort__need_collapse)
+ if (hists__has(leader, need_collapse))
root = &leader->entries_collapsed;
else
root = leader->entries_in;
@@ -2129,7 +2132,7 @@ int hists__link(struct hists *leader, struct hists *other)
struct rb_node *nd;
struct hist_entry *pos, *pair;
- if (sort__need_collapse)
+ if (hists__has(other, need_collapse))
root = &other->entries_collapsed;
else
root = other->entries_in;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index bec0cd660fbd..0f84bfb42bb1 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -82,6 +82,8 @@ struct hists {
int nr_hpp_node;
};
+#define hists__has(__h, __f) (__h)->hpp_list->__f
+
struct hist_entry_iter;
struct hist_iter_ops {
@@ -199,8 +201,6 @@ int hists__init(void);
int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list);
struct rb_root *hists__get_rotate_entries_in(struct hists *hists);
-int hists__collapse_insert_entry(struct hists *hists,
- struct rb_root *root, struct hist_entry *he);
struct perf_hpp {
char *buf;
@@ -240,6 +240,14 @@ struct perf_hpp_fmt {
struct perf_hpp_list {
struct list_head fields;
struct list_head sorts;
+
+ int need_collapse;
+ int parent;
+ int sym;
+ int dso;
+ int socket;
+ int thread;
+ int comm;
};
extern struct perf_hpp_list perf_hpp_list;
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index abf1366e2a24..9df996085563 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -66,6 +66,7 @@ struct intel_bts {
u64 branches_id;
size_t branches_event_size;
bool synth_needs_swap;
+ unsigned long num_events;
};
struct intel_bts_queue {
@@ -275,6 +276,10 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
union perf_event event;
struct perf_sample sample = { .ip = 0, };
+ if (bts->synth_opts.initial_skip &&
+ bts->num_events++ <= bts->synth_opts.initial_skip)
+ return 0;
+
event.sample.header.type = PERF_RECORD_SAMPLE;
event.sample.header.misc = PERF_RECORD_MISC_USER;
event.sample.header.size = sizeof(struct perf_event_header);
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 9409d014b46c..9c8f15da86ce 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -356,7 +356,7 @@ static const char *intel_pt_err_msgs[] = {
int intel_pt__strerror(int code, char *buf, size_t buflen)
{
- if (code < 1 || code > INTEL_PT_ERR_MAX)
+ if (code < 1 || code >= INTEL_PT_ERR_MAX)
code = INTEL_PT_ERR_UNK;
strlcpy(buf, intel_pt_err_msgs[code], buflen);
return 0;
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 617578440989..137196990012 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -100,6 +100,8 @@ struct intel_pt {
u64 cyc_bit;
u64 noretcomp_bit;
unsigned max_non_turbo_ratio;
+
+ unsigned long num_events;
};
enum switch_state {
@@ -972,6 +974,10 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
return 0;
+ if (pt->synth_opts.initial_skip &&
+ pt->num_events++ < pt->synth_opts.initial_skip)
+ return 0;
+
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.misc = PERF_RECORD_MISC_USER;
event->sample.header.size = sizeof(struct perf_event_header);
@@ -1029,6 +1035,10 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
+ if (pt->synth_opts.initial_skip &&
+ pt->num_events++ < pt->synth_opts.initial_skip)
+ return 0;
+
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.misc = PERF_RECORD_MISC_USER;
event->sample.header.size = sizeof(struct perf_event_header);
@@ -1087,6 +1097,10 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
+ if (pt->synth_opts.initial_skip &&
+ pt->num_events++ < pt->synth_opts.initial_skip)
+ return 0;
+
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.misc = PERF_RECORD_MISC_USER;
event->sample.header.size = sizeof(struct perf_event_header);
@@ -1199,14 +1213,18 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
ptq->have_sample = false;
if (pt->sample_instructions &&
- (state->type & INTEL_PT_INSTRUCTION)) {
+ (state->type & INTEL_PT_INSTRUCTION) &&
+ (!pt->synth_opts.initial_skip ||
+ pt->num_events++ >= pt->synth_opts.initial_skip)) {
err = intel_pt_synth_instruction_sample(ptq);
if (err)
return err;
}
if (pt->sample_transactions &&
- (state->type & INTEL_PT_TRANSACTION)) {
+ (state->type & INTEL_PT_TRANSACTION) &&
+ (!pt->synth_opts.initial_skip ||
+ pt->num_events++ >= pt->synth_opts.initial_skip)) {
err = intel_pt_synth_transaction_sample(ptq);
if (err)
return err;
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index ad0c0bb1fbc7..86afe9618bb0 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -17,6 +17,7 @@
#include "strlist.h"
#include <elf.h>
+#include "tsc.h"
#include "session.h"
#include "jit.h"
#include "jitdump.h"
@@ -33,6 +34,7 @@ struct jit_buf_desc {
size_t bufsize;
FILE *in;
bool needs_bswap; /* handles cross-endianess */
+ bool use_arch_timestamp;
void *debug_data;
size_t nr_debug_entries;
uint32_t code_load_count;
@@ -158,13 +160,16 @@ jit_open(struct jit_buf_desc *jd, const char *name)
header.flags = bswap_64(header.flags);
}
+ jd->use_arch_timestamp = header.flags & JITDUMP_FLAGS_ARCH_TIMESTAMP;
+
if (verbose > 2)
- pr_debug("version=%u\nhdr.size=%u\nts=0x%llx\npid=%d\nelf_mach=%d\n",
+ pr_debug("version=%u\nhdr.size=%u\nts=0x%llx\npid=%d\nelf_mach=%d\nuse_arch_timestamp=%d\n",
header.version,
header.total_size,
(unsigned long long)header.timestamp,
header.pid,
- header.elf_mach);
+ header.elf_mach,
+ jd->use_arch_timestamp);
if (header.flags & JITDUMP_FLAGS_RESERVED) {
pr_err("jitdump file contains invalid or unsupported flags 0x%llx\n",
@@ -172,10 +177,15 @@ jit_open(struct jit_buf_desc *jd, const char *name)
goto error;
}
+ if (jd->use_arch_timestamp && !jd->session->time_conv.time_mult) {
+ pr_err("jitdump file uses arch timestamps but there is no timestamp conversion\n");
+ goto error;
+ }
+
/*
* validate event is using the correct clockid
*/
- if (jit_validate_events(jd->session)) {
+ if (!jd->use_arch_timestamp && jit_validate_events(jd->session)) {
pr_err("error, jitted code must be sampled with perf record -k 1\n");
goto error;
}
@@ -329,6 +339,23 @@ jit_inject_event(struct jit_buf_desc *jd, union perf_event *event)
return 0;
}
+static uint64_t convert_timestamp(struct jit_buf_desc *jd, uint64_t timestamp)
+{
+ struct perf_tsc_conversion tc;
+
+ if (!jd->use_arch_timestamp)
+ return timestamp;
+
+ tc.time_shift = jd->session->time_conv.time_shift;
+ tc.time_mult = jd->session->time_conv.time_mult;
+ tc.time_zero = jd->session->time_conv.time_zero;
+
+ if (!tc.time_mult)
+ return 0;
+
+ return tsc_to_perf_time(timestamp, &tc);
+}
+
static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
{
struct perf_sample sample;
@@ -385,7 +412,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
return -1;
}
if (stat(filename, &st))
- memset(&st, 0, sizeof(stat));
+ memset(&st, 0, sizeof(st));
event->mmap2.header.type = PERF_RECORD_MMAP2;
event->mmap2.header.misc = PERF_RECORD_MISC_USER;
@@ -410,7 +437,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
id->tid = tid;
}
if (jd->sample_type & PERF_SAMPLE_TIME)
- id->time = jr->load.p.timestamp;
+ id->time = convert_timestamp(jd, jr->load.p.timestamp);
/*
* create pseudo sample to induce dso hit increment
@@ -473,7 +500,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
size++; /* for \0 */
if (stat(filename, &st))
- memset(&st, 0, sizeof(stat));
+ memset(&st, 0, sizeof(st));
size = PERF_ALIGN(size, sizeof(u64));
@@ -499,7 +526,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
id->tid = tid;
}
if (jd->sample_type & PERF_SAMPLE_TIME)
- id->time = jr->load.p.timestamp;
+ id->time = convert_timestamp(jd, jr->load.p.timestamp);
/*
* create pseudo sample to induce dso hit increment
diff --git a/tools/perf/util/jitdump.h b/tools/perf/util/jitdump.h
index b66c1f503d9e..bcacd20d0c1c 100644
--- a/tools/perf/util/jitdump.h
+++ b/tools/perf/util/jitdump.h
@@ -23,9 +23,12 @@
#define JITHEADER_VERSION 1
enum jitdump_flags_bits {
+ JITDUMP_FLAGS_ARCH_TIMESTAMP_BIT,
JITDUMP_FLAGS_MAX_BIT,
};
+#define JITDUMP_FLAGS_ARCH_TIMESTAMP (1ULL << JITDUMP_FLAGS_ARCH_TIMESTAMP_BIT)
+
#define JITDUMP_FLAGS_RESERVED (JITDUMP_FLAGS_MAX_BIT < 64 ? \
(~((1ULL << JITDUMP_FLAGS_MAX_BIT) - 1)) : 0)
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 80b9b6a87990..639a2903065e 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -32,6 +32,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
machine->threads = RB_ROOT;
pthread_rwlock_init(&machine->threads_lock, NULL);
+ machine->nr_threads = 0;
INIT_LIST_HEAD(&machine->dead_threads);
machine->last_match = NULL;
@@ -430,6 +431,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
*/
thread__get(th);
machine->last_match = th;
+ ++machine->nr_threads;
}
return th;
@@ -681,11 +683,13 @@ size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
size_t machine__fprintf(struct machine *machine, FILE *fp)
{
- size_t ret = 0;
+ size_t ret;
struct rb_node *nd;
pthread_rwlock_rdlock(&machine->threads_lock);
+ ret = fprintf(fp, "Threads: %u\n", machine->nr_threads);
+
for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread, rb_node);
@@ -908,11 +912,11 @@ int machines__create_kernel_maps(struct machines *machines, pid_t pid)
return machine__create_kernel_maps(machine);
}
-int machine__load_kallsyms(struct machine *machine, const char *filename,
- enum map_type type, symbol_filter_t filter)
+int __machine__load_kallsyms(struct machine *machine, const char *filename,
+ enum map_type type, bool no_kcore, symbol_filter_t filter)
{
struct map *map = machine__kernel_map(machine);
- int ret = dso__load_kallsyms(map->dso, filename, map, filter);
+ int ret = __dso__load_kallsyms(map->dso, filename, map, no_kcore, filter);
if (ret > 0) {
dso__set_loaded(map->dso, type);
@@ -927,6 +931,12 @@ int machine__load_kallsyms(struct machine *machine, const char *filename,
return ret;
}
+int machine__load_kallsyms(struct machine *machine, const char *filename,
+ enum map_type type, symbol_filter_t filter)
+{
+ return __machine__load_kallsyms(machine, filename, type, false, filter);
+}
+
int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
symbol_filter_t filter)
{
@@ -1413,6 +1423,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th,
pthread_rwlock_wrlock(&machine->threads_lock);
rb_erase_init(&th->rb_node, &machine->threads);
RB_CLEAR_NODE(&th->rb_node);
+ --machine->nr_threads;
/*
* Move it first to the dead_threads list, then drop the reference,
* if this is the last reference, then the thread__delete destructor
@@ -1599,6 +1610,7 @@ struct mem_info *sample__resolve_mem(struct perf_sample *sample,
}
static int add_callchain_ip(struct thread *thread,
+ struct callchain_cursor *cursor,
struct symbol **parent,
struct addr_location *root_al,
u8 *cpumode,
@@ -1630,7 +1642,7 @@ static int add_callchain_ip(struct thread *thread,
* It seems the callchain is corrupted.
* Discard all.
*/
- callchain_cursor_reset(&callchain_cursor);
+ callchain_cursor_reset(cursor);
return 1;
}
return 0;
@@ -1640,7 +1652,7 @@ static int add_callchain_ip(struct thread *thread,
}
if (al.sym != NULL) {
- if (sort__has_parent && !*parent &&
+ if (perf_hpp_list.parent && !*parent &&
symbol__match_regex(al.sym, &parent_regex))
*parent = al.sym;
else if (have_ignore_callees && root_al &&
@@ -1648,13 +1660,13 @@ static int add_callchain_ip(struct thread *thread,
/* Treat this symbol as the root,
forgetting its callees. */
*root_al = al;
- callchain_cursor_reset(&callchain_cursor);
+ callchain_cursor_reset(cursor);
}
}
if (symbol_conf.hide_unresolved && al.sym == NULL)
return 0;
- return callchain_cursor_append(&callchain_cursor, al.addr, al.map, al.sym);
+ return callchain_cursor_append(cursor, al.addr, al.map, al.sym);
}
struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
@@ -1724,6 +1736,7 @@ static int remove_loops(struct branch_entry *l, int nr)
* negative error code on other errors.
*/
static int resolve_lbr_callchain_sample(struct thread *thread,
+ struct callchain_cursor *cursor,
struct perf_sample *sample,
struct symbol **parent,
struct addr_location *root_al,
@@ -1756,7 +1769,7 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
*/
int mix_chain_nr = i + 1 + lbr_nr + 1;
- if (mix_chain_nr > PERF_MAX_STACK_DEPTH + PERF_MAX_BRANCH_DEPTH) {
+ if (mix_chain_nr > (int)sysctl_perf_event_max_stack + PERF_MAX_BRANCH_DEPTH) {
pr_warning("corrupted callchain. skipping...\n");
return 0;
}
@@ -1778,7 +1791,7 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
ip = lbr_stack->entries[0].to;
}
- err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
+ err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip);
if (err)
return (err < 0) ? err : 0;
}
@@ -1789,6 +1802,7 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
}
static int thread__resolve_callchain_sample(struct thread *thread,
+ struct callchain_cursor *cursor,
struct perf_evsel *evsel,
struct perf_sample *sample,
struct symbol **parent,
@@ -1803,10 +1817,8 @@ static int thread__resolve_callchain_sample(struct thread *thread,
int skip_idx = -1;
int first_call = 0;
- callchain_cursor_reset(&callchain_cursor);
-
- if (has_branch_callstack(evsel)) {
- err = resolve_lbr_callchain_sample(thread, sample, parent,
+ if (perf_evsel__has_branch_callstack(evsel)) {
+ err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
root_al, max_stack);
if (err)
return (err < 0) ? err : 0;
@@ -1816,7 +1828,7 @@ static int thread__resolve_callchain_sample(struct thread *thread,
* Based on DWARF debug information, some architectures skip
* a callchain entry saved by the kernel.
*/
- if (chain->nr < PERF_MAX_STACK_DEPTH)
+ if (chain->nr < sysctl_perf_event_max_stack)
skip_idx = arch_skip_callchain_idx(thread, chain);
/*
@@ -1863,10 +1875,10 @@ static int thread__resolve_callchain_sample(struct thread *thread,
nr = remove_loops(be, nr);
for (i = 0; i < nr; i++) {
- err = add_callchain_ip(thread, parent, root_al,
+ err = add_callchain_ip(thread, cursor, parent, root_al,
NULL, be[i].to);
if (!err)
- err = add_callchain_ip(thread, parent, root_al,
+ err = add_callchain_ip(thread, cursor, parent, root_al,
NULL, be[i].from);
if (err == -EINVAL)
break;
@@ -1877,7 +1889,7 @@ static int thread__resolve_callchain_sample(struct thread *thread,
}
check_calls:
- if (chain->nr > PERF_MAX_STACK_DEPTH && (int)chain->nr > max_stack) {
+ if (chain->nr > sysctl_perf_event_max_stack && (int)chain->nr > max_stack) {
pr_warning("corrupted callchain. skipping...\n");
return 0;
}
@@ -1896,7 +1908,7 @@ check_calls:
#endif
ip = chain->ips[j];
- err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
+ err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip);
if (err)
return (err < 0) ? err : 0;
@@ -1915,19 +1927,12 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
entry->map, entry->sym);
}
-int thread__resolve_callchain(struct thread *thread,
- struct perf_evsel *evsel,
- struct perf_sample *sample,
- struct symbol **parent,
- struct addr_location *root_al,
- int max_stack)
+static int thread__resolve_callchain_unwind(struct thread *thread,
+ struct callchain_cursor *cursor,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ int max_stack)
{
- int ret = thread__resolve_callchain_sample(thread, evsel,
- sample, parent,
- root_al, max_stack);
- if (ret)
- return ret;
-
/* Can we do dwarf post unwind? */
if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
(evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
@@ -1938,9 +1943,45 @@ int thread__resolve_callchain(struct thread *thread,
(!sample->user_stack.size))
return 0;
- return unwind__get_entries(unwind_entry, &callchain_cursor,
+ return unwind__get_entries(unwind_entry, cursor,
thread, sample, max_stack);
+}
+
+int thread__resolve_callchain(struct thread *thread,
+ struct callchain_cursor *cursor,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct symbol **parent,
+ struct addr_location *root_al,
+ int max_stack)
+{
+ int ret = 0;
+
+ callchain_cursor_reset(&callchain_cursor);
+ if (callchain_param.order == ORDER_CALLEE) {
+ ret = thread__resolve_callchain_sample(thread, cursor,
+ evsel, sample,
+ parent, root_al,
+ max_stack);
+ if (ret)
+ return ret;
+ ret = thread__resolve_callchain_unwind(thread, cursor,
+ evsel, sample,
+ max_stack);
+ } else {
+ ret = thread__resolve_callchain_unwind(thread, cursor,
+ evsel, sample,
+ max_stack);
+ if (ret)
+ return ret;
+ ret = thread__resolve_callchain_sample(thread, cursor,
+ evsel, sample,
+ parent, root_al,
+ max_stack);
+ }
+
+ return ret;
}
int machine__for_each_thread(struct machine *machine,
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 8499db281158..83f46790c52f 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -31,6 +31,7 @@ struct machine {
char *root_dir;
struct rb_root threads;
pthread_rwlock_t threads_lock;
+ unsigned int nr_threads;
struct list_head dead_threads;
struct thread *last_match;
struct vdso_info *vdso_info;
@@ -141,7 +142,11 @@ struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
struct addr_location *al);
struct mem_info *sample__resolve_mem(struct perf_sample *sample,
struct addr_location *al);
+
+struct callchain_cursor;
+
int thread__resolve_callchain(struct thread *thread,
+ struct callchain_cursor *cursor,
struct perf_evsel *evsel,
struct perf_sample *sample,
struct symbol **parent,
@@ -211,6 +216,8 @@ struct symbol *machine__find_kernel_function_by_name(struct machine *machine,
struct map *machine__findnew_module_map(struct machine *machine, u64 start,
const char *filename);
+int __machine__load_kallsyms(struct machine *machine, const char *filename,
+ enum map_type type, bool no_kcore, symbol_filter_t filter);
int machine__load_kallsyms(struct machine *machine, const char *filename,
enum map_type type, symbol_filter_t filter);
int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 171b6d10a04b..b19bcd3b7128 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -289,7 +289,7 @@ int map__load(struct map *map, symbol_filter_t filter)
nr = dso__load(map->dso, map, filter);
if (nr < 0) {
if (map->dso->has_build_id) {
- char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+ char sbuild_id[SBUILD_ID_SIZE];
build_id__sprintf(map->dso->build_id,
sizeof(map->dso->build_id),
@@ -431,6 +431,13 @@ u64 map__rip_2objdump(struct map *map, u64 rip)
if (map->dso->rel)
return rip - map->pgoff;
+ /*
+ * kernel modules also have DSO_TYPE_USER in dso->kernel,
+ * but all kernel modules are ET_REL, so won't get here.
+ */
+ if (map->dso->kernel == DSO_TYPE_USER)
+ return rip + map->dso->text_offset;
+
return map->unmap_ip(map, rip) - map->reloc;
}
@@ -454,6 +461,13 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
if (map->dso->rel)
return map->unmap_ip(map, ip + map->pgoff);
+ /*
+ * kernel modules also have DSO_TYPE_USER in dso->kernel,
+ * but all kernel modules are ET_REL, so won't get here.
+ */
+ if (map->dso->kernel == DSO_TYPE_USER)
+ return map->unmap_ip(map, ip - map->dso->text_offset);
+
return ip + map->reloc;
}
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index b1b9e2385f4b..fe84df1875aa 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -308,3 +308,12 @@ void ordered_events__free(struct ordered_events *oe)
free(event);
}
}
+
+void ordered_events__reinit(struct ordered_events *oe)
+{
+ ordered_events__deliver_t old_deliver = oe->deliver;
+
+ ordered_events__free(oe);
+ memset(oe, '\0', sizeof(*oe));
+ ordered_events__init(oe, old_deliver);
+}
diff --git a/tools/perf/util/ordered-events.h b/tools/perf/util/ordered-events.h
index f403991e3bfd..e11468a9a6e4 100644
--- a/tools/perf/util/ordered-events.h
+++ b/tools/perf/util/ordered-events.h
@@ -49,6 +49,7 @@ void ordered_events__delete(struct ordered_events *oe, struct ordered_event *eve
int ordered_events__flush(struct ordered_events *oe, enum oe_flush how);
void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver);
void ordered_events__free(struct ordered_events *oe);
+void ordered_events__reinit(struct ordered_events *oe);
static inline
void ordered_events__set_alloc_size(struct ordered_events *oe, u64 size)
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index adef23b1352e..ddb0261b2577 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -602,14 +602,13 @@ static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
static __u64 pmu_format_max_value(const unsigned long *format)
{
- int w;
+ __u64 w = 0;
+ int fbit;
- w = bitmap_weight(format, PERF_PMU_FORMAT_BITS);
- if (!w)
- return 0;
- if (w < 64)
- return (1ULL << w) - 1;
- return -1;
+ for_each_set_bit(fbit, format, PERF_PMU_FORMAT_BITS)
+ w |= (1ULL << fbit);
+
+ return w;
}
/*
@@ -644,20 +643,20 @@ static int pmu_resolve_param_term(struct parse_events_term *term,
static char *pmu_formats_string(struct list_head *formats)
{
struct perf_pmu_format *format;
- char *str;
- struct strbuf buf;
+ char *str = NULL;
+ struct strbuf buf = STRBUF_INIT;
unsigned i = 0;
if (!formats)
return NULL;
- strbuf_init(&buf, 0);
/* sysfs exported terms */
list_for_each_entry(format, formats, list)
- strbuf_addf(&buf, i++ ? ",%s" : "%s",
- format->name);
+ if (strbuf_addf(&buf, i++ ? ",%s" : "%s", format->name) < 0)
+ goto error;
str = strbuf_detach(&buf, NULL);
+error:
strbuf_release(&buf);
return str;
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 8319fbb08636..74401a20106d 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -265,6 +265,65 @@ static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
return true;
}
+/*
+ * NOTE:
+ * '.gnu.linkonce.this_module' section of kernel module elf directly
+ * maps to 'struct module' from linux/module.h. This section contains
+ * actual module name which will be used by kernel after loading it.
+ * But, we cannot use 'struct module' here since linux/module.h is not
+ * exposed to user-space. Offset of 'name' has remained same from long
+ * time, so hardcoding it here.
+ */
+#ifdef __LP64__
+#define MOD_NAME_OFFSET 24
+#else
+#define MOD_NAME_OFFSET 12
+#endif
+
+/*
+ * @module can be module name of module file path. In case of path,
+ * inspect elf and find out what is actual module name.
+ * Caller has to free mod_name after using it.
+ */
+static char *find_module_name(const char *module)
+{
+ int fd;
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+ Elf_Data *data;
+ Elf_Scn *sec;
+ char *mod_name = NULL;
+
+ fd = open(module, O_RDONLY);
+ if (fd < 0)
+ return NULL;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL)
+ goto elf_err;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL)
+ goto ret_err;
+
+ sec = elf_section_by_name(elf, &ehdr, &shdr,
+ ".gnu.linkonce.this_module", NULL);
+ if (!sec)
+ goto ret_err;
+
+ data = elf_getdata(sec, NULL);
+ if (!data || !data->d_buf)
+ goto ret_err;
+
+ mod_name = strdup((char *)data->d_buf + MOD_NAME_OFFSET);
+
+ret_err:
+ elf_end(elf);
+elf_err:
+ close(fd);
+ return mod_name;
+}
+
#ifdef HAVE_DWARF_SUPPORT
static int kernel_get_module_dso(const char *module, struct dso **pdso)
@@ -486,8 +545,10 @@ static int get_text_start_address(const char *exec, unsigned long *address)
return -errno;
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
- if (elf == NULL)
- return -EINVAL;
+ if (elf == NULL) {
+ ret = -EINVAL;
+ goto out_close;
+ }
if (gelf_getehdr(elf, &ehdr) == NULL)
goto out;
@@ -499,6 +560,9 @@ static int get_text_start_address(const char *exec, unsigned long *address)
ret = 0;
out:
elf_end(elf);
+out_close:
+ close(fd);
+
return ret;
}
@@ -583,32 +647,23 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
int ntevs, const char *module)
{
int i, ret = 0;
- char *tmp;
+ char *mod_name = NULL;
if (!module)
return 0;
- tmp = strrchr(module, '/');
- if (tmp) {
- /* This is a module path -- get the module name */
- module = strdup(tmp + 1);
- if (!module)
- return -ENOMEM;
- tmp = strchr(module, '.');
- if (tmp)
- *tmp = '\0';
- tmp = (char *)module; /* For free() */
- }
+ mod_name = find_module_name(module);
for (i = 0; i < ntevs; i++) {
- tevs[i].point.module = strdup(module);
+ tevs[i].point.module =
+ strdup(mod_name ? mod_name : module);
if (!tevs[i].point.module) {
ret = -ENOMEM;
break;
}
}
- free(tmp);
+ free(mod_name);
return ret;
}
@@ -1618,69 +1673,65 @@ out:
}
/* Compose only probe arg */
-int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf, size_t len)
+char *synthesize_perf_probe_arg(struct perf_probe_arg *pa)
{
struct perf_probe_arg_field *field = pa->field;
- int ret;
- char *tmp = buf;
+ struct strbuf buf;
+ char *ret = NULL;
+ int err;
+
+ if (strbuf_init(&buf, 64) < 0)
+ return NULL;
if (pa->name && pa->var)
- ret = e_snprintf(tmp, len, "%s=%s", pa->name, pa->var);
+ err = strbuf_addf(&buf, "%s=%s", pa->name, pa->var);
else
- ret = e_snprintf(tmp, len, "%s", pa->name ? pa->name : pa->var);
- if (ret <= 0)
- goto error;
- tmp += ret;
- len -= ret;
+ err = strbuf_addstr(&buf, pa->name ?: pa->var);
+ if (err)
+ goto out;
while (field) {
if (field->name[0] == '[')
- ret = e_snprintf(tmp, len, "%s", field->name);
+ err = strbuf_addstr(&buf, field->name);
else
- ret = e_snprintf(tmp, len, "%s%s",
- field->ref ? "->" : ".", field->name);
- if (ret <= 0)
- goto error;
- tmp += ret;
- len -= ret;
+ err = strbuf_addf(&buf, "%s%s", field->ref ? "->" : ".",
+ field->name);
field = field->next;
+ if (err)
+ goto out;
}
- if (pa->type) {
- ret = e_snprintf(tmp, len, ":%s", pa->type);
- if (ret <= 0)
- goto error;
- tmp += ret;
- len -= ret;
- }
+ if (pa->type)
+ if (strbuf_addf(&buf, ":%s", pa->type) < 0)
+ goto out;
- return tmp - buf;
-error:
- pr_debug("Failed to synthesize perf probe argument: %d\n", ret);
+ ret = strbuf_detach(&buf, NULL);
+out:
+ strbuf_release(&buf);
return ret;
}
/* Compose only probe point (not argument) */
static char *synthesize_perf_probe_point(struct perf_probe_point *pp)
{
- char *buf, *tmp;
- char offs[32] = "", line[32] = "", file[32] = "";
- int ret, len;
+ struct strbuf buf;
+ char *tmp, *ret = NULL;
+ int len, err = 0;
- buf = zalloc(MAX_CMDLEN);
- if (buf == NULL) {
- ret = -ENOMEM;
- goto error;
- }
- if (pp->offset) {
- ret = e_snprintf(offs, 32, "+%lu", pp->offset);
- if (ret <= 0)
- goto error;
- }
- if (pp->line) {
- ret = e_snprintf(line, 32, ":%d", pp->line);
- if (ret <= 0)
- goto error;
+ if (strbuf_init(&buf, 64) < 0)
+ return NULL;
+
+ if (pp->function) {
+ if (strbuf_addstr(&buf, pp->function) < 0)
+ goto out;
+ if (pp->offset)
+ err = strbuf_addf(&buf, "+%lu", pp->offset);
+ else if (pp->line)
+ err = strbuf_addf(&buf, ":%d", pp->line);
+ else if (pp->retprobe)
+ err = strbuf_addstr(&buf, "%return");
+ if (err)
+ goto out;
}
if (pp->file) {
tmp = pp->file;
@@ -1689,25 +1740,15 @@ static char *synthesize_perf_probe_point(struct perf_probe_point *pp)
tmp = strchr(pp->file + len - 30, '/');
tmp = tmp ? tmp + 1 : pp->file + len - 30;
}
- ret = e_snprintf(file, 32, "@%s", tmp);
- if (ret <= 0)
- goto error;
+ err = strbuf_addf(&buf, "@%s", tmp);
+ if (!err && !pp->function && pp->line)
+ err = strbuf_addf(&buf, ":%d", pp->line);
}
-
- if (pp->function)
- ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s%s", pp->function,
- offs, pp->retprobe ? "%return" : "", line,
- file);
- else
- ret = e_snprintf(buf, MAX_CMDLEN, "%s%s", file, line);
- if (ret <= 0)
- goto error;
-
- return buf;
-error:
- pr_debug("Failed to synthesize perf probe point: %d\n", ret);
- free(buf);
- return NULL;
+ if (!err)
+ ret = strbuf_detach(&buf, NULL);
+out:
+ strbuf_release(&buf);
+ return ret;
}
#if 0
@@ -1736,45 +1777,32 @@ char *synthesize_perf_probe_command(struct perf_probe_event *pev)
#endif
static int __synthesize_probe_trace_arg_ref(struct probe_trace_arg_ref *ref,
- char **buf, size_t *buflen,
- int depth)
+ struct strbuf *buf, int depth)
{
- int ret;
+ int err;
if (ref->next) {
depth = __synthesize_probe_trace_arg_ref(ref->next, buf,
- buflen, depth + 1);
+ depth + 1);
if (depth < 0)
- goto out;
- }
-
- ret = e_snprintf(*buf, *buflen, "%+ld(", ref->offset);
- if (ret < 0)
- depth = ret;
- else {
- *buf += ret;
- *buflen -= ret;
+ return depth;
}
-out:
- return depth;
-
+ err = strbuf_addf(buf, "%+ld(", ref->offset);
+ return (err < 0) ? err : depth;
}
static int synthesize_probe_trace_arg(struct probe_trace_arg *arg,
- char *buf, size_t buflen)
+ struct strbuf *buf)
{
struct probe_trace_arg_ref *ref = arg->ref;
- int ret, depth = 0;
- char *tmp = buf;
+ int depth = 0, err;
/* Argument name or separator */
if (arg->name)
- ret = e_snprintf(buf, buflen, " %s=", arg->name);
+ err = strbuf_addf(buf, " %s=", arg->name);
else
- ret = e_snprintf(buf, buflen, " ");
- if (ret < 0)
- return ret;
- buf += ret;
- buflen -= ret;
+ err = strbuf_addch(buf, ' ');
+ if (err)
+ return err;
/* Special case: @XXX */
if (arg->value[0] == '@' && arg->ref)
@@ -1782,59 +1810,44 @@ static int synthesize_probe_trace_arg(struct probe_trace_arg *arg,
/* Dereferencing arguments */
if (ref) {
- depth = __synthesize_probe_trace_arg_ref(ref, &buf,
- &buflen, 1);
+ depth = __synthesize_probe_trace_arg_ref(ref, buf, 1);
if (depth < 0)
return depth;
}
/* Print argument value */
if (arg->value[0] == '@' && arg->ref)
- ret = e_snprintf(buf, buflen, "%s%+ld", arg->value,
- arg->ref->offset);
+ err = strbuf_addf(buf, "%s%+ld", arg->value, arg->ref->offset);
else
- ret = e_snprintf(buf, buflen, "%s", arg->value);
- if (ret < 0)
- return ret;
- buf += ret;
- buflen -= ret;
+ err = strbuf_addstr(buf, arg->value);
/* Closing */
- while (depth--) {
- ret = e_snprintf(buf, buflen, ")");
- if (ret < 0)
- return ret;
- buf += ret;
- buflen -= ret;
- }
+ while (!err && depth--)
+ err = strbuf_addch(buf, ')');
+
/* Print argument type */
- if (arg->type) {
- ret = e_snprintf(buf, buflen, ":%s", arg->type);
- if (ret <= 0)
- return ret;
- buf += ret;
- }
+ if (!err && arg->type)
+ err = strbuf_addf(buf, ":%s", arg->type);
- return buf - tmp;
+ return err;
}
char *synthesize_probe_trace_command(struct probe_trace_event *tev)
{
struct probe_trace_point *tp = &tev->point;
- char *buf;
- int i, len, ret;
+ struct strbuf buf;
+ char *ret = NULL;
+ int i, err;
- buf = zalloc(MAX_CMDLEN);
- if (buf == NULL)
+ /* Uprobes must have tp->module */
+ if (tev->uprobes && !tp->module)
return NULL;
- len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s ", tp->retprobe ? 'r' : 'p',
- tev->group, tev->event);
- if (len <= 0)
- goto error;
+ if (strbuf_init(&buf, 32) < 0)
+ return NULL;
- /* Uprobes must have tp->module */
- if (tev->uprobes && !tp->module)
+ if (strbuf_addf(&buf, "%c:%s/%s ", tp->retprobe ? 'r' : 'p',
+ tev->group, tev->event) < 0)
goto error;
/*
* If tp->address == 0, then this point must be a
@@ -1849,34 +1862,25 @@ char *synthesize_probe_trace_command(struct probe_trace_event *tev)
/* Use the tp->address for uprobes */
if (tev->uprobes)
- ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s:0x%lx",
- tp->module, tp->address);
+ err = strbuf_addf(&buf, "%s:0x%lx", tp->module, tp->address);
else if (!strncmp(tp->symbol, "0x", 2))
/* Absolute address. See try_to_find_absolute_address() */
- ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s%s0x%lx",
- tp->module ?: "", tp->module ? ":" : "",
- tp->address);
+ err = strbuf_addf(&buf, "%s%s0x%lx", tp->module ?: "",
+ tp->module ? ":" : "", tp->address);
else
- ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s%s%s+%lu",
- tp->module ?: "", tp->module ? ":" : "",
- tp->symbol, tp->offset);
-
- if (ret <= 0)
+ err = strbuf_addf(&buf, "%s%s%s+%lu", tp->module ?: "",
+ tp->module ? ":" : "", tp->symbol, tp->offset);
+ if (err)
goto error;
- len += ret;
- for (i = 0; i < tev->nargs; i++) {
- ret = synthesize_probe_trace_arg(&tev->args[i], buf + len,
- MAX_CMDLEN - len);
- if (ret <= 0)
+ for (i = 0; i < tev->nargs; i++)
+ if (synthesize_probe_trace_arg(&tev->args[i], &buf) < 0)
goto error;
- len += ret;
- }
- return buf;
+ ret = strbuf_detach(&buf, NULL);
error:
- free(buf);
- return NULL;
+ strbuf_release(&buf);
+ return ret;
}
static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
@@ -1958,7 +1962,7 @@ static int convert_to_perf_probe_point(struct probe_trace_point *tp,
static int convert_to_perf_probe_event(struct probe_trace_event *tev,
struct perf_probe_event *pev, bool is_kprobe)
{
- char buf[64] = "";
+ struct strbuf buf = STRBUF_INIT;
int i, ret;
/* Convert event/group name */
@@ -1981,14 +1985,15 @@ static int convert_to_perf_probe_event(struct probe_trace_event *tev,
if (tev->args[i].name)
pev->args[i].name = strdup(tev->args[i].name);
else {
- ret = synthesize_probe_trace_arg(&tev->args[i],
- buf, 64);
- pev->args[i].name = strdup(buf);
+ if ((ret = strbuf_init(&buf, 32)) < 0)
+ goto error;
+ ret = synthesize_probe_trace_arg(&tev->args[i], &buf);
+ pev->args[i].name = strbuf_detach(&buf, NULL);
}
if (pev->args[i].name == NULL && ret >= 0)
ret = -ENOMEM;
}
-
+error:
if (ret < 0)
clear_perf_probe_event(pev);
@@ -2162,35 +2167,38 @@ static int perf_probe_event__sprintf(const char *group, const char *event,
struct strbuf *result)
{
int i, ret;
- char buf[128];
- char *place;
+ char *buf;
- /* Synthesize only event probe point */
- place = synthesize_perf_probe_point(&pev->point);
- if (!place)
- return -EINVAL;
+ if (asprintf(&buf, "%s:%s", group, event) < 0)
+ return -errno;
+ ret = strbuf_addf(result, " %-20s (on ", buf);
+ free(buf);
+ if (ret)
+ return ret;
- ret = e_snprintf(buf, 128, "%s:%s", group, event);
- if (ret < 0)
- goto out;
+ /* Synthesize only event probe point */
+ buf = synthesize_perf_probe_point(&pev->point);
+ if (!buf)
+ return -ENOMEM;
+ ret = strbuf_addstr(result, buf);
+ free(buf);
- strbuf_addf(result, " %-20s (on %s", buf, place);
- if (module)
- strbuf_addf(result, " in %s", module);
+ if (!ret && module)
+ ret = strbuf_addf(result, " in %s", module);
- if (pev->nargs > 0) {
- strbuf_add(result, " with", 5);
- for (i = 0; i < pev->nargs; i++) {
- ret = synthesize_perf_probe_arg(&pev->args[i],
- buf, 128);
- if (ret < 0)
- goto out;
- strbuf_addf(result, " %s", buf);
+ if (!ret && pev->nargs > 0) {
+ ret = strbuf_add(result, " with", 5);
+ for (i = 0; !ret && i < pev->nargs; i++) {
+ buf = synthesize_perf_probe_arg(&pev->args[i]);
+ if (!buf)
+ return -ENOMEM;
+ ret = strbuf_addf(result, " %s", buf);
+ free(buf);
}
}
- strbuf_addch(result, ')');
-out:
- free(place);
+ if (!ret)
+ ret = strbuf_addch(result, ')');
+
return ret;
}
@@ -2498,7 +2506,8 @@ static int find_probe_functions(struct map *map, char *name,
void __weak arch__fix_tev_from_maps(struct perf_probe_event *pev __maybe_unused,
struct probe_trace_event *tev __maybe_unused,
- struct map *map __maybe_unused) { }
+ struct map *map __maybe_unused,
+ struct symbol *sym __maybe_unused) { }
/*
* Find probe function addresses from map.
@@ -2516,6 +2525,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
struct probe_trace_point *tp;
int num_matched_functions;
int ret, i, j, skipped = 0;
+ char *mod_name;
map = get_target_map(pev->target, pev->uprobes);
if (!map) {
@@ -2600,9 +2610,19 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
tp->realname = strdup_or_goto(sym->name, nomem_out);
tp->retprobe = pp->retprobe;
- if (pev->target)
- tev->point.module = strdup_or_goto(pev->target,
- nomem_out);
+ if (pev->target) {
+ if (pev->uprobes) {
+ tev->point.module = strdup_or_goto(pev->target,
+ nomem_out);
+ } else {
+ mod_name = find_module_name(pev->target);
+ tev->point.module =
+ strdup(mod_name ? mod_name : pev->target);
+ free(mod_name);
+ if (!tev->point.module)
+ goto nomem_out;
+ }
+ }
tev->uprobes = pev->uprobes;
tev->nargs = pev->nargs;
if (tev->nargs) {
@@ -2624,7 +2644,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
strdup_or_goto(pev->args[i].type,
nomem_out);
}
- arch__fix_tev_from_maps(pev, tev, map);
+ arch__fix_tev_from_maps(pev, tev, map, sym);
}
if (ret == skipped) {
ret = -ENOENT;
@@ -2743,9 +2763,13 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
{
int ret;
- if (pev->uprobes && !pev->group) {
- /* Replace group name if not given */
- ret = convert_exec_to_group(pev->target, &pev->group);
+ if (!pev->group) {
+ /* Set group name if not given */
+ if (!pev->uprobes) {
+ pev->group = strdup(PERFPROBE_GROUP);
+ ret = pev->group ? 0 : -ENOMEM;
+ } else
+ ret = convert_exec_to_group(pev->target, &pev->group);
if (ret != 0) {
pr_warning("Failed to make a group name.\n");
return ret;
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index e54e7b011577..5a27eb4fad05 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -120,7 +120,7 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev);
/* Events to command string */
char *synthesize_perf_probe_command(struct perf_probe_event *pev);
char *synthesize_probe_trace_command(struct probe_trace_event *tev);
-int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf, size_t len);
+char *synthesize_perf_probe_arg(struct perf_probe_arg *pa);
/* Check the perf_probe_event needs debuginfo */
bool perf_probe_event_need_dwarf(struct perf_probe_event *pev);
@@ -154,7 +154,8 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
int show_available_funcs(const char *module, struct strfilter *filter, bool user);
bool arch__prefers_symtab(void);
void arch__fix_tev_from_maps(struct perf_probe_event *pev,
- struct probe_trace_event *tev, struct map *map);
+ struct probe_trace_event *tev, struct map *map,
+ struct symbol *sym);
/* If there is no space to write, returns -E2BIG. */
int e_snprintf(char *str, size_t size, const char *format, ...)
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index e3b3b92e4458..3fe6214970e6 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -220,8 +220,7 @@ int probe_file__add_event(int fd, struct probe_trace_event *tev)
pr_debug("Writing event: %s\n", buf);
if (!probe_event_dry_run) {
- ret = write(fd, buf, strlen(buf));
- if (ret <= 0) {
+ if (write(fd, buf, strlen(buf)) < (int)strlen(buf)) {
ret = -errno;
pr_warning("Failed to write event: %s\n",
strerror_r(errno, sbuf, sizeof(sbuf)));
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index b3bd0fba0237..1259839dbf6d 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -553,7 +553,7 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
{
Dwarf_Die vr_die;
- char buf[32], *ptr;
+ char *buf, *ptr;
int ret = 0;
/* Copy raw parameters */
@@ -563,13 +563,13 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
if (pf->pvar->name)
pf->tvar->name = strdup(pf->pvar->name);
else {
- ret = synthesize_perf_probe_arg(pf->pvar, buf, 32);
- if (ret < 0)
- return ret;
+ buf = synthesize_perf_probe_arg(pf->pvar);
+ if (!buf)
+ return -ENOMEM;
ptr = strchr(buf, ':'); /* Change type separator to _ */
if (ptr)
*ptr = '_';
- pf->tvar->name = strdup(buf);
+ pf->tvar->name = buf;
}
if (pf->tvar->name == NULL)
return -ENOMEM;
@@ -1294,6 +1294,7 @@ static int collect_variables_cb(Dwarf_Die *die_mem, void *data)
{
struct available_var_finder *af = data;
struct variable_list *vl;
+ struct strbuf buf = STRBUF_INIT;
int tag, ret;
vl = &af->vls[af->nvls - 1];
@@ -1307,25 +1308,26 @@ static int collect_variables_cb(Dwarf_Die *die_mem, void *data)
if (ret == 0 || ret == -ERANGE) {
int ret2;
bool externs = !af->child;
- struct strbuf buf;
- strbuf_init(&buf, 64);
+ if (strbuf_init(&buf, 64) < 0)
+ goto error;
if (probe_conf.show_location_range) {
- if (!externs) {
- if (ret)
- strbuf_add(&buf, "[INV]\t", 6);
- else
- strbuf_add(&buf, "[VAL]\t", 6);
- } else
- strbuf_add(&buf, "[EXT]\t", 6);
+ if (!externs)
+ ret2 = strbuf_add(&buf,
+ ret ? "[INV]\t" : "[VAL]\t", 6);
+ else
+ ret2 = strbuf_add(&buf, "[EXT]\t", 6);
+ if (ret2)
+ goto error;
}
ret2 = die_get_varname(die_mem, &buf);
if (!ret2 && probe_conf.show_location_range &&
!externs) {
- strbuf_addch(&buf, '\t');
+ if (strbuf_addch(&buf, '\t') < 0)
+ goto error;
ret2 = die_get_var_range(&af->pf.sp_die,
die_mem, &buf);
}
@@ -1343,6 +1345,10 @@ static int collect_variables_cb(Dwarf_Die *die_mem, void *data)
return DIE_FIND_CB_CONTINUE;
else
return DIE_FIND_CB_SIBLING;
+error:
+ strbuf_release(&buf);
+ pr_debug("Error in strbuf\n");
+ return DIE_FIND_CB_END;
}
/* Add a found vars into available variables list */
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 8162ba0e2e57..36c6862119e3 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -23,3 +23,4 @@ util/strlist.c
util/trace-event.c
../lib/rbtree.c
util/string.c
+util/symbol_fprintf.c
diff --git a/tools/perf/util/quote.c b/tools/perf/util/quote.c
index 01f03242b86a..c6d4ee2de752 100644
--- a/tools/perf/util/quote.c
+++ b/tools/perf/util/quote.c
@@ -17,38 +17,42 @@ static inline int need_bs_quote(char c)
return (c == '\'' || c == '!');
}
-static void sq_quote_buf(struct strbuf *dst, const char *src)
+static int sq_quote_buf(struct strbuf *dst, const char *src)
{
char *to_free = NULL;
+ int ret;
if (dst->buf == src)
to_free = strbuf_detach(dst, NULL);
- strbuf_addch(dst, '\'');
- while (*src) {
+ ret = strbuf_addch(dst, '\'');
+ while (!ret && *src) {
size_t len = strcspn(src, "'!");
- strbuf_add(dst, src, len);
+ ret = strbuf_add(dst, src, len);
src += len;
- while (need_bs_quote(*src)) {
- strbuf_addstr(dst, "'\\");
- strbuf_addch(dst, *src++);
- strbuf_addch(dst, '\'');
- }
+ while (!ret && need_bs_quote(*src))
+ ret = strbuf_addf(dst, "'\\%c\'", *src++);
}
- strbuf_addch(dst, '\'');
+ if (!ret)
+ ret = strbuf_addch(dst, '\'');
free(to_free);
+
+ return ret;
}
-void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
+int sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
{
- int i;
+ int i, ret;
/* Copy into destination buffer. */
- strbuf_grow(dst, 255);
- for (i = 0; argv[i]; ++i) {
- strbuf_addch(dst, ' ');
- sq_quote_buf(dst, argv[i]);
+ ret = strbuf_grow(dst, 255);
+ for (i = 0; !ret && argv[i]; ++i) {
+ ret = strbuf_addch(dst, ' ');
+ if (ret)
+ break;
+ ret = sq_quote_buf(dst, argv[i]);
if (maxlen && dst->len > maxlen)
die("Too many or long arguments");
}
+ return ret;
}
diff --git a/tools/perf/util/quote.h b/tools/perf/util/quote.h
index 3340c9c4a6ca..e1ec19146fb0 100644
--- a/tools/perf/util/quote.h
+++ b/tools/perf/util/quote.h
@@ -24,6 +24,6 @@
* sq_quote() in a real application.
*/
-void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen);
+int sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen);
#endif /* __PERF_QUOTE_H */
diff --git a/tools/perf/util/rb_resort.h b/tools/perf/util/rb_resort.h
new file mode 100644
index 000000000000..abc76e3d3098
--- /dev/null
+++ b/tools/perf/util/rb_resort.h
@@ -0,0 +1,149 @@
+#ifndef _PERF_RESORT_RB_H_
+#define _PERF_RESORT_RB_H_
+/*
+ * Template for creating a class to resort an existing rb_tree according to
+ * a new sort criteria, that must be present in the entries of the source
+ * rb_tree.
+ *
+ * (c) 2016 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Quick example, resorting threads by its shortname:
+ *
+ * First define the prefix (threads) to be used for the functions and data
+ * structures created, and provide an expression for the sorting, then the
+ * fields to be present in each of the entries in the new, sorted, rb_tree.
+ *
+ * The body of the init function should collect the fields, maybe
+ * pre-calculating them from multiple entries in the original 'entry' from
+ * the rb_tree used as a source for the entries to be sorted:
+
+DEFINE_RB_RESORT_RB(threads, strcmp(a->thread->shortname,
+ b->thread->shortname) < 0,
+ struct thread *thread;
+)
+{
+ entry->thread = rb_entry(nd, struct thread, rb_node);
+}
+
+ * After this it is just a matter of instantiating it and iterating it,
+ * for a few data structures with existing rb_trees, such as 'struct machine',
+ * helpers are available to get the rb_root and the nr_entries:
+
+ DECLARE_RESORT_RB_MACHINE_THREADS(threads, machine_ptr);
+
+ * This will instantiate the new rb_tree and a cursor for it, that can be used as:
+
+ struct rb_node *nd;
+
+ resort_rb__for_each(nd, threads) {
+ struct thread *t = threads_entry;
+ printf("%s: %d\n", t->shortname, t->tid);
+ }
+
+ * Then delete it:
+
+ resort_rb__delete(threads);
+
+ * The name of the data structures and functions will have a _sorted suffix
+ * right before the method names, i.e. will look like:
+ *
+ * struct threads_sorted_entry {}
+ * threads_sorted__insert()
+ */
+
+#define DEFINE_RESORT_RB(__name, __comp, ...) \
+struct __name##_sorted_entry { \
+ struct rb_node rb_node; \
+ __VA_ARGS__ \
+}; \
+static void __name##_sorted__init_entry(struct rb_node *nd, \
+ struct __name##_sorted_entry *entry); \
+ \
+static int __name##_sorted__cmp(struct rb_node *nda, struct rb_node *ndb) \
+{ \
+ struct __name##_sorted_entry *a, *b; \
+ a = rb_entry(nda, struct __name##_sorted_entry, rb_node); \
+ b = rb_entry(ndb, struct __name##_sorted_entry, rb_node); \
+ return __comp; \
+} \
+ \
+struct __name##_sorted { \
+ struct rb_root entries; \
+ struct __name##_sorted_entry nd[0]; \
+}; \
+ \
+static void __name##_sorted__insert(struct __name##_sorted *sorted, \
+ struct rb_node *sorted_nd) \
+{ \
+ struct rb_node **p = &sorted->entries.rb_node, *parent = NULL; \
+ while (*p != NULL) { \
+ parent = *p; \
+ if (__name##_sorted__cmp(sorted_nd, parent)) \
+ p = &(*p)->rb_left; \
+ else \
+ p = &(*p)->rb_right; \
+ } \
+ rb_link_node(sorted_nd, parent, p); \
+ rb_insert_color(sorted_nd, &sorted->entries); \
+} \
+ \
+static void __name##_sorted__sort(struct __name##_sorted *sorted, \
+ struct rb_root *entries) \
+{ \
+ struct rb_node *nd; \
+ unsigned int i = 0; \
+ for (nd = rb_first(entries); nd; nd = rb_next(nd)) { \
+ struct __name##_sorted_entry *snd = &sorted->nd[i++]; \
+ __name##_sorted__init_entry(nd, snd); \
+ __name##_sorted__insert(sorted, &snd->rb_node); \
+ } \
+} \
+ \
+static struct __name##_sorted *__name##_sorted__new(struct rb_root *entries, \
+ int nr_entries) \
+{ \
+ struct __name##_sorted *sorted; \
+ sorted = malloc(sizeof(*sorted) + sizeof(sorted->nd[0]) * nr_entries); \
+ if (sorted) { \
+ sorted->entries = RB_ROOT; \
+ __name##_sorted__sort(sorted, entries); \
+ } \
+ return sorted; \
+} \
+ \
+static void __name##_sorted__delete(struct __name##_sorted *sorted) \
+{ \
+ free(sorted); \
+} \
+ \
+static void __name##_sorted__init_entry(struct rb_node *nd, \
+ struct __name##_sorted_entry *entry)
+
+#define DECLARE_RESORT_RB(__name) \
+struct __name##_sorted_entry *__name##_entry; \
+struct __name##_sorted *__name = __name##_sorted__new
+
+#define resort_rb__for_each(__nd, __name) \
+ for (__nd = rb_first(&__name->entries); \
+ __name##_entry = rb_entry(__nd, struct __name##_sorted_entry, \
+ rb_node), __nd; \
+ __nd = rb_next(__nd))
+
+#define resort_rb__delete(__name) \
+ __name##_sorted__delete(__name), __name = NULL
+
+/*
+ * Helpers for other classes that contains both an rbtree and the
+ * number of entries in it:
+ */
+
+/* For 'struct intlist' */
+#define DECLARE_RESORT_RB_INTLIST(__name, __ilist) \
+ DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries, \
+ __ilist->rblist.nr_entries)
+
+/* For 'struct machine->threads' */
+#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine) \
+ DECLARE_RESORT_RB(__name)(&__machine->threads, __machine->nr_threads)
+
+#endif /* _PERF_RESORT_RB_H_ */
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index 0467367dc315..481792c7484b 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -129,7 +129,8 @@ bool perf_can_record_cpu_wide(void)
return true;
}
-void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
+void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
+ struct callchain_param *callchain)
{
struct perf_evsel *evsel;
bool use_sample_identifier = false;
@@ -148,7 +149,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
use_comm_exec = perf_can_comm_exec();
evlist__for_each(evlist, evsel) {
- perf_evsel__config(evsel, opts);
+ perf_evsel__config(evsel, opts, callchain);
if (evsel->tracking && use_comm_exec)
evsel->attr.comm_exec = 1;
}
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index b3aabc0d4eb0..62c7f6988e0e 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -31,6 +31,8 @@
#include <perl.h>
#include "../../perf.h"
+#include "../callchain.h"
+#include "../machine.h"
#include "../thread.h"
#include "../event.h"
#include "../trace-event.h"
@@ -248,10 +250,90 @@ static void define_event_symbols(struct event_format *event,
define_event_symbols(event, ev_name, args->next);
}
+static SV *perl_process_callchain(struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct addr_location *al)
+{
+ AV *list;
+
+ list = newAV();
+ if (!list)
+ goto exit;
+
+ if (!symbol_conf.use_callchain || !sample->callchain)
+ goto exit;
+
+ if (thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
+ sample, NULL, NULL,
+ sysctl_perf_event_max_stack) != 0) {
+ pr_err("Failed to resolve callchain. Skipping\n");
+ goto exit;
+ }
+ callchain_cursor_commit(&callchain_cursor);
+
+
+ while (1) {
+ HV *elem;
+ struct callchain_cursor_node *node;
+ node = callchain_cursor_current(&callchain_cursor);
+ if (!node)
+ break;
+
+ elem = newHV();
+ if (!elem)
+ goto exit;
+
+ if (!hv_stores(elem, "ip", newSVuv(node->ip))) {
+ hv_undef(elem);
+ goto exit;
+ }
+
+ if (node->sym) {
+ HV *sym = newHV();
+ if (!sym) {
+ hv_undef(elem);
+ goto exit;
+ }
+ if (!hv_stores(sym, "start", newSVuv(node->sym->start)) ||
+ !hv_stores(sym, "end", newSVuv(node->sym->end)) ||
+ !hv_stores(sym, "binding", newSVuv(node->sym->binding)) ||
+ !hv_stores(sym, "name", newSVpvn(node->sym->name,
+ node->sym->namelen)) ||
+ !hv_stores(elem, "sym", newRV_noinc((SV*)sym))) {
+ hv_undef(sym);
+ hv_undef(elem);
+ goto exit;
+ }
+ }
+
+ if (node->map) {
+ struct map *map = node->map;
+ const char *dsoname = "[unknown]";
+ if (map && map->dso && (map->dso->name || map->dso->long_name)) {
+ if (symbol_conf.show_kernel_path && map->dso->long_name)
+ dsoname = map->dso->long_name;
+ else if (map->dso->name)
+ dsoname = map->dso->name;
+ }
+ if (!hv_stores(elem, "dso", newSVpv(dsoname,0))) {
+ hv_undef(elem);
+ goto exit;
+ }
+ }
+
+ callchain_cursor_advance(&callchain_cursor);
+ av_push(list, newRV_noinc((SV*)elem));
+ }
+
+exit:
+ return newRV_noinc((SV*)list);
+}
+
static void perl_process_tracepoint(struct perf_sample *sample,
struct perf_evsel *evsel,
- struct thread *thread)
+ struct addr_location *al)
{
+ struct thread *thread = al->thread;
struct event_format *event = evsel->tp_format;
struct format_field *field;
static char handler[256];
@@ -295,6 +377,7 @@ static void perl_process_tracepoint(struct perf_sample *sample,
XPUSHs(sv_2mortal(newSVuv(ns)));
XPUSHs(sv_2mortal(newSViv(pid)));
XPUSHs(sv_2mortal(newSVpv(comm, 0)));
+ XPUSHs(sv_2mortal(perl_process_callchain(sample, evsel, al)));
/* common fields other than pid can be accessed via xsub fns */
@@ -329,6 +412,7 @@ static void perl_process_tracepoint(struct perf_sample *sample,
XPUSHs(sv_2mortal(newSVuv(nsecs)));
XPUSHs(sv_2mortal(newSViv(pid)));
XPUSHs(sv_2mortal(newSVpv(comm, 0)));
+ XPUSHs(sv_2mortal(perl_process_callchain(sample, evsel, al)));
call_pv("main::trace_unhandled", G_SCALAR);
}
SPAGAIN;
@@ -366,7 +450,7 @@ static void perl_process_event(union perf_event *event,
struct perf_evsel *evsel,
struct addr_location *al)
{
- perl_process_tracepoint(sample, evsel, al->thread);
+ perl_process_tracepoint(sample, evsel, al);
perl_process_event_generic(event, sample, evsel);
}
@@ -490,7 +574,27 @@ static int perl_generate_script(struct pevent *pevent, const char *outfile)
fprintf(ofp, "use Perf::Trace::Util;\n\n");
fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n");
- fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n");
+ fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n");
+
+
+ fprintf(ofp, "\n\
+sub print_backtrace\n\
+{\n\
+ my $callchain = shift;\n\
+ for my $node (@$callchain)\n\
+ {\n\
+ if(exists $node->{sym})\n\
+ {\n\
+ printf( \"\\t[\\%%x] \\%%s\\n\", $node->{ip}, $node->{sym}{name});\n\
+ }\n\
+ else\n\
+ {\n\
+ printf( \"\\t[\\%%x]\\n\", $node{ip});\n\
+ }\n\
+ }\n\
+}\n\n\
+");
+
while ((event = trace_find_next_event(pevent, event))) {
fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name);
@@ -502,7 +606,8 @@ static int perl_generate_script(struct pevent *pevent, const char *outfile)
fprintf(ofp, "$common_secs, ");
fprintf(ofp, "$common_nsecs,\n");
fprintf(ofp, "\t $common_pid, ");
- fprintf(ofp, "$common_comm,\n\t ");
+ fprintf(ofp, "$common_comm, ");
+ fprintf(ofp, "$common_callchain,\n\t ");
not_first = 0;
count = 0;
@@ -519,7 +624,7 @@ static int perl_generate_script(struct pevent *pevent, const char *outfile)
fprintf(ofp, "\tprint_header($event_name, $common_cpu, "
"$common_secs, $common_nsecs,\n\t "
- "$common_pid, $common_comm);\n\n");
+ "$common_pid, $common_comm, $common_callchain);\n\n");
fprintf(ofp, "\tprintf(\"");
@@ -581,17 +686,22 @@ static int perl_generate_script(struct pevent *pevent, const char *outfile)
fprintf(ofp, "$%s", f->name);
}
- fprintf(ofp, ");\n");
+ fprintf(ofp, ");\n\n");
+
+ fprintf(ofp, "\tprint_backtrace($common_callchain);\n");
+
fprintf(ofp, "}\n\n");
}
fprintf(ofp, "sub trace_unhandled\n{\n\tmy ($event_name, $context, "
"$common_cpu, $common_secs, $common_nsecs,\n\t "
- "$common_pid, $common_comm) = @_;\n\n");
+ "$common_pid, $common_comm, $common_callchain) = @_;\n\n");
fprintf(ofp, "\tprint_header($event_name, $common_cpu, "
"$common_secs, $common_nsecs,\n\t $common_pid, "
- "$common_comm);\n}\n\n");
+ "$common_comm, $common_callchain);\n");
+ fprintf(ofp, "\tprint_backtrace($common_callchain);\n");
+ fprintf(ofp, "}\n\n");
fprintf(ofp, "sub print_header\n{\n"
"\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n"
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index fbd05242b4e5..ff134700bf30 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -41,6 +41,7 @@
#include "../thread-stack.h"
#include "../trace-event.h"
#include "../machine.h"
+#include "../call-path.h"
#include "thread_map.h"
#include "cpumap.h"
#include "stat.h"
@@ -323,7 +324,7 @@ static PyObject *python_process_callchain(struct perf_sample *sample,
if (!symbol_conf.use_callchain || !sample->callchain)
goto exit;
- if (thread__resolve_callchain(al->thread, evsel,
+ if (thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
sample, NULL, NULL,
scripting_max_stack) != 0) {
pr_err("Failed to resolve callchain. Skipping\n");
@@ -407,8 +408,11 @@ static void python_process_tracepoint(struct perf_sample *sample,
if (!t)
Py_FatalError("couldn't create Python tuple");
- if (!event)
- die("ug! no event found for type %d", (int)evsel->attr.config);
+ if (!event) {
+ snprintf(handler_name, sizeof(handler_name),
+ "ug! no event found for type %" PRIu64, (u64)evsel->attr.config);
+ Py_FatalError(handler_name);
+ }
pid = raw_field_value(event, "common_pid", data);
@@ -614,7 +618,7 @@ static int python_export_dso(struct db_export *dbe, struct dso *dso,
struct machine *machine)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
- char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+ char sbuild_id[SBUILD_ID_SIZE];
PyObject *t;
build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
@@ -681,7 +685,7 @@ static int python_export_sample(struct db_export *dbe,
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
- t = tuple_new(21);
+ t = tuple_new(22);
tuple_set_u64(t, 0, es->db_id);
tuple_set_u64(t, 1, es->evsel->db_id);
@@ -704,6 +708,7 @@ static int python_export_sample(struct db_export *dbe,
tuple_set_u64(t, 18, es->sample->data_src);
tuple_set_s32(t, 19, es->sample->flags & PERF_BRANCH_MASK);
tuple_set_s32(t, 20, !!(es->sample->flags & PERF_IP_FLAG_IN_TX));
+ tuple_set_u64(t, 21, es->call_path_id);
call_object(tables->sample_handler, t, "sample_table");
@@ -998,8 +1003,10 @@ static void set_table_handlers(struct tables *tables)
{
const char *perf_db_export_mode = "perf_db_export_mode";
const char *perf_db_export_calls = "perf_db_export_calls";
- PyObject *db_export_mode, *db_export_calls;
+ const char *perf_db_export_callchains = "perf_db_export_callchains";
+ PyObject *db_export_mode, *db_export_calls, *db_export_callchains;
bool export_calls = false;
+ bool export_callchains = false;
int ret;
memset(tables, 0, sizeof(struct tables));
@@ -1016,6 +1023,7 @@ static void set_table_handlers(struct tables *tables)
if (!ret)
return;
+ /* handle export calls */
tables->dbe.crp = NULL;
db_export_calls = PyDict_GetItemString(main_dict, perf_db_export_calls);
if (db_export_calls) {
@@ -1033,6 +1041,33 @@ static void set_table_handlers(struct tables *tables)
Py_FatalError("failed to create calls processor");
}
+ /* handle export callchains */
+ tables->dbe.cpr = NULL;
+ db_export_callchains = PyDict_GetItemString(main_dict,
+ perf_db_export_callchains);
+ if (db_export_callchains) {
+ ret = PyObject_IsTrue(db_export_callchains);
+ if (ret == -1)
+ handler_call_die(perf_db_export_callchains);
+ export_callchains = !!ret;
+ }
+
+ if (export_callchains) {
+ /*
+ * Attempt to use the call path root from the call return
+ * processor, if the call return processor is in use. Otherwise,
+ * we allocate a new call path root. This prevents exporting
+ * duplicate call path ids when both are in use simultaniously.
+ */
+ if (tables->dbe.crp)
+ tables->dbe.cpr = tables->dbe.crp->cpr;
+ else
+ tables->dbe.cpr = call_path_root__new();
+
+ if (!tables->dbe.cpr)
+ Py_FatalError("failed to create call path root");
+ }
+
tables->db_export_mode = true;
/*
* Reserve per symbol space for symbol->db_id via symbol__priv()
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 4abd85c6346d..2335b2824d8a 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -409,6 +409,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->stat = process_stat_stub;
if (tool->stat_round == NULL)
tool->stat_round = process_stat_round_stub;
+ if (tool->time_conv == NULL)
+ tool->time_conv = process_event_op2_stub;
}
static void swap_sample_id_all(union perf_event *event, void *data)
@@ -794,6 +796,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_STAT] = perf_event__stat_swap,
[PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
[PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
+ [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
[PERF_RECORD_HEADER_MAX] = NULL,
};
@@ -904,7 +907,7 @@ static void callchain__printf(struct perf_evsel *evsel,
unsigned int i;
struct ip_callchain *callchain = sample->callchain;
- if (has_branch_callstack(evsel))
+ if (perf_evsel__has_branch_callstack(evsel))
callchain__lbr_callstack_printf(sample);
printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
@@ -1078,7 +1081,7 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
if (sample_type & PERF_SAMPLE_CALLCHAIN)
callchain__printf(evsel, sample);
- if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !has_branch_callstack(evsel))
+ if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
branch_stack__printf(sample);
if (sample_type & PERF_SAMPLE_REGS_USER)
@@ -1341,6 +1344,9 @@ static s64 perf_session__process_user_event(struct perf_session *session,
return tool->stat(tool, event, session);
case PERF_RECORD_STAT_ROUND:
return tool->stat_round(tool, event, session);
+ case PERF_RECORD_TIME_CONV:
+ session->time_conv = event->time_conv;
+ return tool->time_conv(tool, event, session);
default:
return -EINVAL;
}
@@ -1830,7 +1836,11 @@ out:
out_err:
ui_progress__finish();
perf_session__warn_about_errors(session);
- ordered_events__free(&session->ordered_events);
+ /*
+ * We may switching perf.data output, make ordered_events
+ * reusable.
+ */
+ ordered_events__reinit(&session->ordered_events);
auxtrace__free_events(session);
session->one_mmap = false;
return err;
@@ -1947,105 +1957,6 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
return NULL;
}
-void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
- struct addr_location *al,
- unsigned int print_opts, unsigned int stack_depth)
-{
- struct callchain_cursor_node *node;
- int print_ip = print_opts & PRINT_IP_OPT_IP;
- int print_sym = print_opts & PRINT_IP_OPT_SYM;
- int print_dso = print_opts & PRINT_IP_OPT_DSO;
- int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
- int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
- int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE;
- char s = print_oneline ? ' ' : '\t';
-
- if (symbol_conf.use_callchain && sample->callchain) {
- struct addr_location node_al;
-
- if (thread__resolve_callchain(al->thread, evsel,
- sample, NULL, NULL,
- stack_depth) != 0) {
- if (verbose)
- error("Failed to resolve callchain. Skipping\n");
- return;
- }
- callchain_cursor_commit(&callchain_cursor);
-
- if (print_symoffset)
- node_al = *al;
-
- while (stack_depth) {
- u64 addr = 0;
-
- node = callchain_cursor_current(&callchain_cursor);
- if (!node)
- break;
-
- if (node->sym && node->sym->ignore)
- goto next;
-
- if (print_ip)
- printf("%c%16" PRIx64, s, node->ip);
-
- if (node->map)
- addr = node->map->map_ip(node->map, node->ip);
-
- if (print_sym) {
- printf(" ");
- if (print_symoffset) {
- node_al.addr = addr;
- node_al.map = node->map;
- symbol__fprintf_symname_offs(node->sym, &node_al, stdout);
- } else
- symbol__fprintf_symname(node->sym, stdout);
- }
-
- if (print_dso) {
- printf(" (");
- map__fprintf_dsoname(node->map, stdout);
- printf(")");
- }
-
- if (print_srcline)
- map__fprintf_srcline(node->map, addr, "\n ",
- stdout);
-
- if (!print_oneline)
- printf("\n");
-
- stack_depth--;
-next:
- callchain_cursor_advance(&callchain_cursor);
- }
-
- } else {
- if (al->sym && al->sym->ignore)
- return;
-
- if (print_ip)
- printf("%16" PRIx64, sample->ip);
-
- if (print_sym) {
- printf(" ");
- if (print_symoffset)
- symbol__fprintf_symname_offs(al->sym, al,
- stdout);
- else
- symbol__fprintf_symname(al->sym, stdout);
- }
-
- if (print_dso) {
- printf(" (");
- map__fprintf_dsoname(al->map, stdout);
- printf(")");
- }
-
- if (print_srcline)
- map__fprintf_srcline(al->map, al->addr, "\n ", stdout);
- }
-}
-
int perf_session__cpu_bitmap(struct perf_session *session,
const char *cpu_list, unsigned long *cpu_bitmap)
{
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 5f792e35d4c1..4bd758553450 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -26,6 +26,7 @@ struct perf_session {
struct itrace_synth_opts *itrace_synth_opts;
struct list_head auxtrace_index;
struct trace_event tevent;
+ struct time_conv_event time_conv;
bool repipe;
bool one_mmap;
void *one_mmap_addr;
@@ -35,13 +36,6 @@ struct perf_session {
struct perf_tool *tool;
};
-#define PRINT_IP_OPT_IP (1<<0)
-#define PRINT_IP_OPT_SYM (1<<1)
-#define PRINT_IP_OPT_DSO (1<<2)
-#define PRINT_IP_OPT_SYMOFFSET (1<<3)
-#define PRINT_IP_OPT_ONELINE (1<<4)
-#define PRINT_IP_OPT_SRCLINE (1<<5)
-
struct perf_tool;
struct perf_session *perf_session__new(struct perf_data_file *file,
@@ -103,10 +97,6 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type);
-void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
- struct addr_location *al,
- unsigned int print_opts, unsigned int stack_depth);
-
int perf_session__cpu_bitmap(struct perf_session *session,
const char *cpu_list, unsigned long *cpu_bitmap);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index f5ba111cd9fb..20e69edd5006 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -21,13 +21,6 @@ const char *sort_order;
const char *field_order;
regex_t ignore_callees_regex;
int have_ignore_callees = 0;
-int sort__need_collapse = 0;
-int sort__has_parent = 0;
-int sort__has_sym = 0;
-int sort__has_dso = 0;
-int sort__has_socket = 0;
-int sort__has_thread = 0;
-int sort__has_comm = 0;
enum sort_mode sort__mode = SORT_MODE__NORMAL;
/*
@@ -244,7 +237,7 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
* comparing symbol address alone is not enough since it's a
* relative address within a dso.
*/
- if (!sort__has_dso) {
+ if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
ret = sort__dso_cmp(left, right);
if (ret != 0)
return ret;
@@ -2163,7 +2156,7 @@ static int __sort_dimension__add(struct sort_dimension *sd,
return -1;
if (sd->entry->se_collapse)
- sort__need_collapse = 1;
+ list->need_collapse = 1;
sd->taken = 1;
@@ -2245,9 +2238,9 @@ static int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
pr_err("Invalid regex: %s\n%s", parent_pattern, err);
return -EINVAL;
}
- sort__has_parent = 1;
+ list->parent = 1;
} else if (sd->entry == &sort_sym) {
- sort__has_sym = 1;
+ list->sym = 1;
/*
* perf diff displays the performance difference amongst
* two or more perf.data files. Those files could come
@@ -2258,13 +2251,13 @@ static int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
sd->entry->se_collapse = sort__sym_sort;
} else if (sd->entry == &sort_dso) {
- sort__has_dso = 1;
+ list->dso = 1;
} else if (sd->entry == &sort_socket) {
- sort__has_socket = 1;
+ list->socket = 1;
} else if (sd->entry == &sort_thread) {
- sort__has_thread = 1;
+ list->thread = 1;
} else if (sd->entry == &sort_comm) {
- sort__has_comm = 1;
+ list->comm = 1;
}
return __sort_dimension__add(sd, list, level);
@@ -2289,7 +2282,7 @@ static int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
return -EINVAL;
if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
- sort__has_sym = 1;
+ list->sym = 1;
__sort_dimension__add(sd, list, level);
return 0;
@@ -2305,7 +2298,7 @@ static int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
return -EINVAL;
if (sd->entry == &sort_mem_daddr_sym)
- sort__has_sym = 1;
+ list->sym = 1;
__sort_dimension__add(sd, list, level);
return 0;
@@ -2749,10 +2742,10 @@ int setup_sorting(struct perf_evlist *evlist)
void reset_output_field(void)
{
- sort__need_collapse = 0;
- sort__has_parent = 0;
- sort__has_sym = 0;
- sort__has_dso = 0;
+ perf_hpp_list.need_collapse = 0;
+ perf_hpp_list.parent = 0;
+ perf_hpp_list.sym = 0;
+ perf_hpp_list.dso = 0;
field_order = NULL;
sort_order = NULL;
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 3f4e35998119..42927f448bcb 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -31,13 +31,6 @@ extern const char *parent_pattern;
extern const char default_sort_order[];
extern regex_t ignore_callees_regex;
extern int have_ignore_callees;
-extern int sort__need_collapse;
-extern int sort__has_dso;
-extern int sort__has_parent;
-extern int sort__has_sym;
-extern int sort__has_socket;
-extern int sort__has_thread;
-extern int sort__has_comm;
extern enum sort_mode sort__mode;
extern struct sort_entry sort_comm;
extern struct sort_entry sort_dso;
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 4d9b481cf3b6..ffa1d0653861 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -307,6 +307,7 @@ int perf_stat_process_counter(struct perf_stat_config *config,
struct perf_counts_values *aggr = &counter->counts->aggr;
struct perf_stat_evsel *ps = counter->priv;
u64 *count = counter->counts->aggr.values;
+ u64 val;
int i, ret;
aggr->val = aggr->ena = aggr->run = 0;
@@ -346,7 +347,8 @@ int perf_stat_process_counter(struct perf_stat_config *config,
/*
* Save the full runtime - to allow normalization during printout:
*/
- perf_stat__update_shadow_stats(counter, count, 0);
+ val = counter->scale * *count;
+ perf_stat__update_shadow_stats(counter, &val, 0);
return 0;
}
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index 8fb73295ec34..f95f682aa2b2 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -1,3 +1,4 @@
+#include "debug.h"
#include "cache.h"
#include <linux/kernel.h>
@@ -17,12 +18,13 @@ int prefixcmp(const char *str, const char *prefix)
*/
char strbuf_slopbuf[1];
-void strbuf_init(struct strbuf *sb, ssize_t hint)
+int strbuf_init(struct strbuf *sb, ssize_t hint)
{
sb->alloc = sb->len = 0;
sb->buf = strbuf_slopbuf;
if (hint)
- strbuf_grow(sb, hint);
+ return strbuf_grow(sb, hint);
+ return 0;
}
void strbuf_release(struct strbuf *sb)
@@ -42,67 +44,104 @@ char *strbuf_detach(struct strbuf *sb, size_t *sz)
return res;
}
-void strbuf_grow(struct strbuf *sb, size_t extra)
+int strbuf_grow(struct strbuf *sb, size_t extra)
{
- if (sb->len + extra + 1 <= sb->len)
- die("you want to use way too much memory");
- if (!sb->alloc)
- sb->buf = NULL;
- ALLOC_GROW(sb->buf, sb->len + extra + 1, sb->alloc);
+ char *buf;
+ size_t nr = sb->len + extra + 1;
+
+ if (nr < sb->alloc)
+ return 0;
+
+ if (nr <= sb->len)
+ return -E2BIG;
+
+ if (alloc_nr(sb->alloc) > nr)
+ nr = alloc_nr(sb->alloc);
+
+ /*
+ * Note that sb->buf == strbuf_slopbuf if sb->alloc == 0, and it is
+ * a static variable. Thus we have to avoid passing it to realloc.
+ */
+ buf = realloc(sb->alloc ? sb->buf : NULL, nr * sizeof(*buf));
+ if (!buf)
+ return -ENOMEM;
+
+ sb->buf = buf;
+ sb->alloc = nr;
+ return 0;
}
-void strbuf_addch(struct strbuf *sb, int c)
+int strbuf_addch(struct strbuf *sb, int c)
{
- strbuf_grow(sb, 1);
+ int ret = strbuf_grow(sb, 1);
+ if (ret)
+ return ret;
+
sb->buf[sb->len++] = c;
sb->buf[sb->len] = '\0';
+ return 0;
}
-void strbuf_add(struct strbuf *sb, const void *data, size_t len)
+int strbuf_add(struct strbuf *sb, const void *data, size_t len)
{
- strbuf_grow(sb, len);
+ int ret = strbuf_grow(sb, len);
+ if (ret)
+ return ret;
+
memcpy(sb->buf + sb->len, data, len);
- strbuf_setlen(sb, sb->len + len);
+ return strbuf_setlen(sb, sb->len + len);
}
-static void strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap)
+static int strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap)
{
- int len;
+ int len, ret;
va_list ap_saved;
- if (!strbuf_avail(sb))
- strbuf_grow(sb, 64);
+ if (!strbuf_avail(sb)) {
+ ret = strbuf_grow(sb, 64);
+ if (ret)
+ return ret;
+ }
va_copy(ap_saved, ap);
len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
if (len < 0)
- die("your vsnprintf is broken");
+ return len;
if (len > strbuf_avail(sb)) {
- strbuf_grow(sb, len);
+ ret = strbuf_grow(sb, len);
+ if (ret)
+ return ret;
len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved);
va_end(ap_saved);
if (len > strbuf_avail(sb)) {
- die("this should not happen, your vsnprintf is broken");
+ pr_debug("this should not happen, your vsnprintf is broken");
+ return -EINVAL;
}
}
- strbuf_setlen(sb, sb->len + len);
+ return strbuf_setlen(sb, sb->len + len);
}
-void strbuf_addf(struct strbuf *sb, const char *fmt, ...)
+int strbuf_addf(struct strbuf *sb, const char *fmt, ...)
{
va_list ap;
+ int ret;
va_start(ap, fmt);
- strbuf_addv(sb, fmt, ap);
+ ret = strbuf_addv(sb, fmt, ap);
va_end(ap);
+ return ret;
}
ssize_t strbuf_read(struct strbuf *sb, int fd, ssize_t hint)
{
size_t oldlen = sb->len;
size_t oldalloc = sb->alloc;
+ int ret;
+
+ ret = strbuf_grow(sb, hint ? hint : 8192);
+ if (ret)
+ return ret;
- strbuf_grow(sb, hint ? hint : 8192);
for (;;) {
ssize_t cnt;
@@ -112,12 +151,14 @@ ssize_t strbuf_read(struct strbuf *sb, int fd, ssize_t hint)
strbuf_release(sb);
else
strbuf_setlen(sb, oldlen);
- return -1;
+ return cnt;
}
if (!cnt)
break;
sb->len += cnt;
- strbuf_grow(sb, 8192);
+ ret = strbuf_grow(sb, 8192);
+ if (ret)
+ return ret;
}
sb->buf[sb->len] = '\0';
diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h
index ab9be0fbbd40..54b409297d4a 100644
--- a/tools/perf/util/strbuf.h
+++ b/tools/perf/util/strbuf.h
@@ -51,7 +51,7 @@ struct strbuf {
#define STRBUF_INIT { 0, 0, strbuf_slopbuf }
/*----- strbuf life cycle -----*/
-void strbuf_init(struct strbuf *buf, ssize_t hint);
+int strbuf_init(struct strbuf *buf, ssize_t hint);
void strbuf_release(struct strbuf *buf);
char *strbuf_detach(struct strbuf *buf, size_t *);
@@ -60,26 +60,31 @@ static inline ssize_t strbuf_avail(const struct strbuf *sb) {
return sb->alloc ? sb->alloc - sb->len - 1 : 0;
}
-void strbuf_grow(struct strbuf *buf, size_t);
+int strbuf_grow(struct strbuf *buf, size_t);
-static inline void strbuf_setlen(struct strbuf *sb, size_t len) {
- if (!sb->alloc)
- strbuf_grow(sb, 0);
+static inline int strbuf_setlen(struct strbuf *sb, size_t len) {
+ int ret;
+ if (!sb->alloc) {
+ ret = strbuf_grow(sb, 0);
+ if (ret)
+ return ret;
+ }
assert(len < sb->alloc);
sb->len = len;
sb->buf[len] = '\0';
+ return 0;
}
/*----- add data in your buffer -----*/
-void strbuf_addch(struct strbuf *sb, int c);
+int strbuf_addch(struct strbuf *sb, int c);
-void strbuf_add(struct strbuf *buf, const void *, size_t);
-static inline void strbuf_addstr(struct strbuf *sb, const char *s) {
- strbuf_add(sb, s, strlen(s));
+int strbuf_add(struct strbuf *buf, const void *, size_t);
+static inline int strbuf_addstr(struct strbuf *sb, const char *s) {
+ return strbuf_add(sb, s, strlen(s));
}
__attribute__((format(printf,2,3)))
-void strbuf_addf(struct strbuf *sb, const char *fmt, ...);
+int strbuf_addf(struct strbuf *sb, const char *fmt, ...);
/* XXX: if read fails, any partial read is undone */
ssize_t strbuf_read(struct strbuf *, int fd, ssize_t hint);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index bc229a74c6a9..87a297dd8901 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -709,17 +709,10 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
if (ss->opdshdr.sh_type != SHT_PROGBITS)
ss->opdsec = NULL;
- if (dso->kernel == DSO_TYPE_USER) {
- GElf_Shdr shdr;
- ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
- ehdr.e_type == ET_REL ||
- dso__is_vdso(dso) ||
- elf_section_by_name(elf, &ehdr, &shdr,
- ".gnu.prelink_undo",
- NULL) != NULL);
- } else {
+ if (dso->kernel == DSO_TYPE_USER)
+ ss->adjust_symbols = true;
+ else
ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
- }
ss->name = strdup(name);
if (!ss->name) {
@@ -777,7 +770,8 @@ static bool want_demangle(bool is_kernel_sym)
return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
}
-void __weak arch__elf_sym_adjust(GElf_Sym *sym __maybe_unused) { }
+void __weak arch__sym_update(struct symbol *s __maybe_unused,
+ GElf_Sym *sym __maybe_unused) { }
int dso__load_sym(struct dso *dso, struct map *map,
struct symsrc *syms_ss, struct symsrc *runtime_ss,
@@ -954,8 +948,6 @@ int dso__load_sym(struct dso *dso, struct map *map,
(sym.st_value & 1))
--sym.st_value;
- arch__elf_sym_adjust(&sym);
-
if (dso->kernel || kmodule) {
char dso_name[PATH_MAX];
@@ -1089,6 +1081,8 @@ new_symbol:
if (!f)
goto out_elf_end;
+ arch__sym_update(f, &sym);
+
if (filter && filter(curr_map, f))
symbol__delete(f);
else {
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index e7588dc91518..7fb33304fb4e 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -255,40 +255,6 @@ void symbol__delete(struct symbol *sym)
free(((void *)sym) - symbol_conf.priv_size);
}
-size_t symbol__fprintf(struct symbol *sym, FILE *fp)
-{
- return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n",
- sym->start, sym->end,
- sym->binding == STB_GLOBAL ? 'g' :
- sym->binding == STB_LOCAL ? 'l' : 'w',
- sym->name);
-}
-
-size_t symbol__fprintf_symname_offs(const struct symbol *sym,
- const struct addr_location *al, FILE *fp)
-{
- unsigned long offset;
- size_t length;
-
- if (sym && sym->name) {
- length = fprintf(fp, "%s", sym->name);
- if (al) {
- if (al->addr < sym->end)
- offset = al->addr - sym->start;
- else
- offset = al->addr - al->map->start - sym->start;
- length += fprintf(fp, "+0x%lx", offset);
- }
- return length;
- } else
- return fprintf(fp, "[unknown]");
-}
-
-size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp)
-{
- return symbol__fprintf_symname_offs(sym, NULL, fp);
-}
-
void symbols__delete(struct rb_root *symbols)
{
struct symbol *pos;
@@ -335,7 +301,7 @@ static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
if (ip < s->start)
n = n->rb_left;
- else if (ip >= s->end)
+ else if (ip > s->end || (ip == s->end && ip != s->start))
n = n->rb_right;
else
return s;
@@ -364,11 +330,6 @@ static struct symbol *symbols__next(struct symbol *sym)
return NULL;
}
-struct symbol_name_rb_node {
- struct rb_node rb_node;
- struct symbol sym;
-};
-
static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
{
struct rb_node **p = &symbols->rb_node;
@@ -452,6 +413,18 @@ void dso__reset_find_symbol_cache(struct dso *dso)
}
}
+void dso__insert_symbol(struct dso *dso, enum map_type type, struct symbol *sym)
+{
+ symbols__insert(&dso->symbols[type], sym);
+
+ /* update the symbol cache if necessary */
+ if (dso->last_find_result[type].addr >= sym->start &&
+ (dso->last_find_result[type].addr < sym->end ||
+ sym->start == sym->end)) {
+ dso->last_find_result[type].symbol = sym;
+ }
+}
+
struct symbol *dso__find_symbol(struct dso *dso,
enum map_type type, u64 addr)
{
@@ -497,21 +470,6 @@ void dso__sort_by_name(struct dso *dso, enum map_type type)
&dso->symbols[type]);
}
-size_t dso__fprintf_symbols_by_name(struct dso *dso,
- enum map_type type, FILE *fp)
-{
- size_t ret = 0;
- struct rb_node *nd;
- struct symbol_name_rb_node *pos;
-
- for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) {
- pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
- fprintf(fp, "%s\n", pos->sym.name);
- }
-
- return ret;
-}
-
int modules__parse(const char *filename, void *arg,
int (*process_module)(void *arg, const char *name,
u64 start))
@@ -1262,8 +1220,8 @@ static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
return 0;
}
-int dso__load_kallsyms(struct dso *dso, const char *filename,
- struct map *map, symbol_filter_t filter)
+int __dso__load_kallsyms(struct dso *dso, const char *filename,
+ struct map *map, bool no_kcore, symbol_filter_t filter)
{
u64 delta = 0;
@@ -1284,12 +1242,18 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
else
dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
- if (!dso__load_kcore(dso, map, filename))
+ if (!no_kcore && !dso__load_kcore(dso, map, filename))
return dso__split_kallsyms_for_kcore(dso, map, filter);
else
return dso__split_kallsyms(dso, map, delta, filter);
}
+int dso__load_kallsyms(struct dso *dso, const char *filename,
+ struct map *map, symbol_filter_t filter)
+{
+ return __dso__load_kallsyms(dso, filename, map, false, filter);
+}
+
static int dso__load_perf_map(struct dso *dso, struct map *map,
symbol_filter_t filter)
{
@@ -1644,25 +1608,27 @@ out:
return err;
}
+static bool visible_dir_filter(const char *name, struct dirent *d)
+{
+ if (d->d_type != DT_DIR)
+ return false;
+ return lsdir_no_dot_filter(name, d);
+}
+
static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
{
char kallsyms_filename[PATH_MAX];
- struct dirent *dent;
int ret = -1;
- DIR *d;
+ struct strlist *dirs;
+ struct str_node *nd;
- d = opendir(dir);
- if (!d)
+ dirs = lsdir(dir, visible_dir_filter);
+ if (!dirs)
return -1;
- while (1) {
- dent = readdir(d);
- if (!dent)
- break;
- if (dent->d_type != DT_DIR)
- continue;
+ strlist__for_each(nd, dirs) {
scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
- "%s/%s/kallsyms", dir, dent->d_name);
+ "%s/%s/kallsyms", dir, nd->s);
if (!validate_kcore_addresses(kallsyms_filename, map)) {
strlcpy(dir, kallsyms_filename, dir_sz);
ret = 0;
@@ -1670,7 +1636,7 @@ static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
}
}
- closedir(d);
+ strlist__delete(dirs);
return ret;
}
@@ -1678,7 +1644,7 @@ static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
static char *dso__find_kallsyms(struct dso *dso, struct map *map)
{
u8 host_build_id[BUILD_ID_SIZE];
- char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+ char sbuild_id[SBUILD_ID_SIZE];
bool is_host = false;
char path[PATH_MAX];
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index c8b7544d9267..2b5e4ed76fcb 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -55,6 +55,7 @@ struct symbol {
u16 namelen;
u8 binding;
bool ignore;
+ u8 arch_sym;
char name[0];
};
@@ -140,6 +141,11 @@ struct symbol_conf {
extern struct symbol_conf symbol_conf;
+struct symbol_name_rb_node {
+ struct rb_node rb_node;
+ struct symbol sym;
+};
+
static inline int __symbol__join_symfs(char *bf, size_t size, const char *path)
{
return path__join(bf, size, symbol_conf.symfs, path);
@@ -235,9 +241,14 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
symbol_filter_t filter);
int dso__load_vmlinux_path(struct dso *dso, struct map *map,
symbol_filter_t filter);
+int __dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map,
+ bool no_kcore, symbol_filter_t filter);
int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map,
symbol_filter_t filter);
+void dso__insert_symbol(struct dso *dso, enum map_type type,
+ struct symbol *sym);
+
struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
u64 addr);
struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
@@ -262,8 +273,14 @@ int symbol__init(struct perf_env *env);
void symbol__exit(void);
void symbol__elf_init(void);
struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name);
+size_t __symbol__fprintf_symname_offs(const struct symbol *sym,
+ const struct addr_location *al,
+ bool unknown_as_addr, FILE *fp);
size_t symbol__fprintf_symname_offs(const struct symbol *sym,
const struct addr_location *al, FILE *fp);
+size_t __symbol__fprintf_symname(const struct symbol *sym,
+ const struct addr_location *al,
+ bool unknown_as_addr, FILE *fp);
size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp);
size_t symbol__fprintf(struct symbol *sym, FILE *fp);
bool symbol_type__is_a(char symbol_type, enum map_type map_type);
@@ -310,7 +327,7 @@ int setup_intlist(struct intlist **list, const char *list_str,
#ifdef HAVE_LIBELF_SUPPORT
bool elf__needs_adjust_symbols(GElf_Ehdr ehdr);
-void arch__elf_sym_adjust(GElf_Sym *sym);
+void arch__sym_update(struct symbol *s, GElf_Sym *sym);
#endif
#define SYMBOL_A 0
diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
new file mode 100644
index 000000000000..a680bdaa65dc
--- /dev/null
+++ b/tools/perf/util/symbol_fprintf.c
@@ -0,0 +1,71 @@
+#include <elf.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "symbol.h"
+
+size_t symbol__fprintf(struct symbol *sym, FILE *fp)
+{
+ return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n",
+ sym->start, sym->end,
+ sym->binding == STB_GLOBAL ? 'g' :
+ sym->binding == STB_LOCAL ? 'l' : 'w',
+ sym->name);
+}
+
+size_t __symbol__fprintf_symname_offs(const struct symbol *sym,
+ const struct addr_location *al,
+ bool unknown_as_addr, FILE *fp)
+{
+ unsigned long offset;
+ size_t length;
+
+ if (sym && sym->name) {
+ length = fprintf(fp, "%s", sym->name);
+ if (al) {
+ if (al->addr < sym->end)
+ offset = al->addr - sym->start;
+ else
+ offset = al->addr - al->map->start - sym->start;
+ length += fprintf(fp, "+0x%lx", offset);
+ }
+ return length;
+ } else if (al && unknown_as_addr)
+ return fprintf(fp, "[%#" PRIx64 "]", al->addr);
+ else
+ return fprintf(fp, "[unknown]");
+}
+
+size_t symbol__fprintf_symname_offs(const struct symbol *sym,
+ const struct addr_location *al,
+ FILE *fp)
+{
+ return __symbol__fprintf_symname_offs(sym, al, false, fp);
+}
+
+size_t __symbol__fprintf_symname(const struct symbol *sym,
+ const struct addr_location *al,
+ bool unknown_as_addr, FILE *fp)
+{
+ return __symbol__fprintf_symname_offs(sym, al, unknown_as_addr, fp);
+}
+
+size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp)
+{
+ return __symbol__fprintf_symname_offs(sym, NULL, false, fp);
+}
+
+size_t dso__fprintf_symbols_by_name(struct dso *dso,
+ enum map_type type, FILE *fp)
+{
+ size_t ret = 0;
+ struct rb_node *nd;
+ struct symbol_name_rb_node *pos;
+
+ for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) {
+ pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
+ fprintf(fp, "%s\n", pos->sym.name);
+ }
+
+ return ret;
+}
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
new file mode 100644
index 000000000000..bbb4c1957578
--- /dev/null
+++ b/tools/perf/util/syscalltbl.c
@@ -0,0 +1,134 @@
+/*
+ * System call table mapper
+ *
+ * (C) 2016 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "syscalltbl.h"
+#include <stdlib.h>
+
+#ifdef HAVE_SYSCALL_TABLE
+#include <linux/compiler.h>
+#include <string.h>
+#include "util.h"
+
+#if defined(__x86_64__)
+#include <asm/syscalls_64.c>
+const int syscalltbl_native_max_id = SYSCALLTBL_x86_64_MAX_ID;
+static const char **syscalltbl_native = syscalltbl_x86_64;
+#endif
+
+struct syscall {
+ int id;
+ const char *name;
+};
+
+static int syscallcmpname(const void *vkey, const void *ventry)
+{
+ const char *key = vkey;
+ const struct syscall *entry = ventry;
+
+ return strcmp(key, entry->name);
+}
+
+static int syscallcmp(const void *va, const void *vb)
+{
+ const struct syscall *a = va, *b = vb;
+
+ return strcmp(a->name, b->name);
+}
+
+static int syscalltbl__init_native(struct syscalltbl *tbl)
+{
+ int nr_entries = 0, i, j;
+ struct syscall *entries;
+
+ for (i = 0; i <= syscalltbl_native_max_id; ++i)
+ if (syscalltbl_native[i])
+ ++nr_entries;
+
+ entries = tbl->syscalls.entries = malloc(sizeof(struct syscall) * nr_entries);
+ if (tbl->syscalls.entries == NULL)
+ return -1;
+
+ for (i = 0, j = 0; i <= syscalltbl_native_max_id; ++i) {
+ if (syscalltbl_native[i]) {
+ entries[j].name = syscalltbl_native[i];
+ entries[j].id = i;
+ ++j;
+ }
+ }
+
+ qsort(tbl->syscalls.entries, nr_entries, sizeof(struct syscall), syscallcmp);
+ tbl->syscalls.nr_entries = nr_entries;
+ return 0;
+}
+
+struct syscalltbl *syscalltbl__new(void)
+{
+ struct syscalltbl *tbl = malloc(sizeof(*tbl));
+ if (tbl) {
+ if (syscalltbl__init_native(tbl)) {
+ free(tbl);
+ return NULL;
+ }
+ }
+ return tbl;
+}
+
+void syscalltbl__delete(struct syscalltbl *tbl)
+{
+ zfree(&tbl->syscalls.entries);
+ free(tbl);
+}
+
+const char *syscalltbl__name(const struct syscalltbl *tbl __maybe_unused, int id)
+{
+ return id <= syscalltbl_native_max_id ? syscalltbl_native[id]: NULL;
+}
+
+int syscalltbl__id(struct syscalltbl *tbl, const char *name)
+{
+ struct syscall *sc = bsearch(name, tbl->syscalls.entries,
+ tbl->syscalls.nr_entries, sizeof(*sc),
+ syscallcmpname);
+
+ return sc ? sc->id : -1;
+}
+
+#else /* HAVE_SYSCALL_TABLE */
+
+#include <libaudit.h>
+
+struct syscalltbl *syscalltbl__new(void)
+{
+ struct syscalltbl *tbl = malloc(sizeof(*tbl));
+ if (tbl)
+ tbl->audit_machine = audit_detect_machine();
+ return tbl;
+}
+
+void syscalltbl__delete(struct syscalltbl *tbl)
+{
+ free(tbl);
+}
+
+const char *syscalltbl__name(const struct syscalltbl *tbl, int id)
+{
+ return audit_syscall_to_name(id, tbl->audit_machine);
+}
+
+int syscalltbl__id(struct syscalltbl *tbl, const char *name)
+{
+ return audit_name_to_syscall(name, tbl->audit_machine);
+}
+#endif /* HAVE_SYSCALL_TABLE */
diff --git a/tools/perf/util/syscalltbl.h b/tools/perf/util/syscalltbl.h
new file mode 100644
index 000000000000..e2951510484f
--- /dev/null
+++ b/tools/perf/util/syscalltbl.h
@@ -0,0 +1,20 @@
+#ifndef __PERF_SYSCALLTBL_H
+#define __PERF_SYSCALLTBL_H
+
+struct syscalltbl {
+ union {
+ int audit_machine;
+ struct {
+ int nr_entries;
+ void *entries;
+ } syscalls;
+ };
+};
+
+struct syscalltbl *syscalltbl__new(void);
+void syscalltbl__delete(struct syscalltbl *tbl);
+
+const char *syscalltbl__name(const struct syscalltbl *tbl, int id);
+int syscalltbl__id(struct syscalltbl *tbl, const char *name);
+
+#endif /* __PERF_SYSCALLTBL_H */
diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c
index 679688e70ae7..825086aa9a08 100644
--- a/tools/perf/util/thread-stack.c
+++ b/tools/perf/util/thread-stack.c
@@ -22,44 +22,9 @@
#include "debug.h"
#include "symbol.h"
#include "comm.h"
+#include "call-path.h"
#include "thread-stack.h"
-#define CALL_PATH_BLOCK_SHIFT 8
-#define CALL_PATH_BLOCK_SIZE (1 << CALL_PATH_BLOCK_SHIFT)
-#define CALL_PATH_BLOCK_MASK (CALL_PATH_BLOCK_SIZE - 1)
-
-struct call_path_block {
- struct call_path cp[CALL_PATH_BLOCK_SIZE];
- struct list_head node;
-};
-
-/**
- * struct call_path_root - root of all call paths.
- * @call_path: root call path
- * @blocks: list of blocks to store call paths
- * @next: next free space
- * @sz: number of spaces
- */
-struct call_path_root {
- struct call_path call_path;
- struct list_head blocks;
- size_t next;
- size_t sz;
-};
-
-/**
- * struct call_return_processor - provides a call-back to consume call-return
- * information.
- * @cpr: call path root
- * @process: call-back that accepts call/return information
- * @data: anonymous data for call-back
- */
-struct call_return_processor {
- struct call_path_root *cpr;
- int (*process)(struct call_return *cr, void *data);
- void *data;
-};
-
#define STACK_GROWTH 2048
/**
@@ -335,108 +300,6 @@ void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr;
}
-static void call_path__init(struct call_path *cp, struct call_path *parent,
- struct symbol *sym, u64 ip, bool in_kernel)
-{
- cp->parent = parent;
- cp->sym = sym;
- cp->ip = sym ? 0 : ip;
- cp->db_id = 0;
- cp->in_kernel = in_kernel;
- RB_CLEAR_NODE(&cp->rb_node);
- cp->children = RB_ROOT;
-}
-
-static struct call_path_root *call_path_root__new(void)
-{
- struct call_path_root *cpr;
-
- cpr = zalloc(sizeof(struct call_path_root));
- if (!cpr)
- return NULL;
- call_path__init(&cpr->call_path, NULL, NULL, 0, false);
- INIT_LIST_HEAD(&cpr->blocks);
- return cpr;
-}
-
-static void call_path_root__free(struct call_path_root *cpr)
-{
- struct call_path_block *pos, *n;
-
- list_for_each_entry_safe(pos, n, &cpr->blocks, node) {
- list_del(&pos->node);
- free(pos);
- }
- free(cpr);
-}
-
-static struct call_path *call_path__new(struct call_path_root *cpr,
- struct call_path *parent,
- struct symbol *sym, u64 ip,
- bool in_kernel)
-{
- struct call_path_block *cpb;
- struct call_path *cp;
- size_t n;
-
- if (cpr->next < cpr->sz) {
- cpb = list_last_entry(&cpr->blocks, struct call_path_block,
- node);
- } else {
- cpb = zalloc(sizeof(struct call_path_block));
- if (!cpb)
- return NULL;
- list_add_tail(&cpb->node, &cpr->blocks);
- cpr->sz += CALL_PATH_BLOCK_SIZE;
- }
-
- n = cpr->next++ & CALL_PATH_BLOCK_MASK;
- cp = &cpb->cp[n];
-
- call_path__init(cp, parent, sym, ip, in_kernel);
-
- return cp;
-}
-
-static struct call_path *call_path__findnew(struct call_path_root *cpr,
- struct call_path *parent,
- struct symbol *sym, u64 ip, u64 ks)
-{
- struct rb_node **p;
- struct rb_node *node_parent = NULL;
- struct call_path *cp;
- bool in_kernel = ip >= ks;
-
- if (sym)
- ip = 0;
-
- if (!parent)
- return call_path__new(cpr, parent, sym, ip, in_kernel);
-
- p = &parent->children.rb_node;
- while (*p != NULL) {
- node_parent = *p;
- cp = rb_entry(node_parent, struct call_path, rb_node);
-
- if (cp->sym == sym && cp->ip == ip)
- return cp;
-
- if (sym < cp->sym || (sym == cp->sym && ip < cp->ip))
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
-
- cp = call_path__new(cpr, parent, sym, ip, in_kernel);
- if (!cp)
- return NULL;
-
- rb_link_node(&cp->rb_node, node_parent, p);
- rb_insert_color(&cp->rb_node, &parent->children);
-
- return cp;
-}
-
struct call_return_processor *
call_return_processor__new(int (*process)(struct call_return *cr, void *data),
void *data)
diff --git a/tools/perf/util/thread-stack.h b/tools/perf/util/thread-stack.h
index e1528f1374c3..ad44c7944b8e 100644
--- a/tools/perf/util/thread-stack.h
+++ b/tools/perf/util/thread-stack.h
@@ -19,17 +19,16 @@
#include <sys/types.h>
#include <linux/types.h>
-#include <linux/rbtree.h>
struct thread;
struct comm;
struct ip_callchain;
struct symbol;
struct dso;
-struct call_return_processor;
struct comm;
struct perf_sample;
struct addr_location;
+struct call_path;
/*
* Call/Return flags.
@@ -69,26 +68,16 @@ struct call_return {
};
/**
- * struct call_path - node in list of calls leading to a function call.
- * @parent: call path to the parent function call
- * @sym: symbol of function called
- * @ip: only if sym is null, the ip of the function
- * @db_id: id used for db-export
- * @in_kernel: whether function is a in the kernel
- * @rb_node: node in parent's tree of called functions
- * @children: tree of call paths of functions called
- *
- * In combination with the call_return structure, the call_path structure
- * defines a context-sensitve call-graph.
+ * struct call_return_processor - provides a call-back to consume call-return
+ * information.
+ * @cpr: call path root
+ * @process: call-back that accepts call/return information
+ * @data: anonymous data for call-back
*/
-struct call_path {
- struct call_path *parent;
- struct symbol *sym;
- u64 ip;
- u64 db_id;
- bool in_kernel;
- struct rb_node rb_node;
- struct rb_root children;
+struct call_return_processor {
+ struct call_path_root *cpr;
+ int (*process)(struct call_return *cr, void *data);
+ void *data;
};
int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index dfd00c6dad6e..45fcb715a36b 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -10,6 +10,8 @@
#include "comm.h"
#include "unwind.h"
+#include <api/fs/fs.h>
+
int thread__init_map_groups(struct thread *thread, struct machine *machine)
{
struct thread *leader;
@@ -153,6 +155,23 @@ int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
return 0;
}
+int thread__set_comm_from_proc(struct thread *thread)
+{
+ char path[64];
+ char *comm = NULL;
+ size_t sz;
+ int err = -1;
+
+ if (!(snprintf(path, sizeof(path), "%d/task/%d/comm",
+ thread->pid_, thread->tid) >= (int)sizeof(path)) &&
+ procfs__read_str(path, &comm, &sz) == 0) {
+ comm[sz - 1] = '\0';
+ err = thread__set_comm(thread, comm, 0);
+ }
+
+ return err;
+}
+
const char *thread__comm_str(const struct thread *thread)
{
const struct comm *comm = thread__comm(thread);
@@ -233,7 +252,7 @@ void thread__find_cpumode_addr_location(struct thread *thread,
struct addr_location *al)
{
size_t i;
- const u8 const cpumodes[] = {
+ const u8 cpumodes[] = {
PERF_RECORD_MISC_USER,
PERF_RECORD_MISC_KERNEL,
PERF_RECORD_MISC_GUEST_USER,
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index a0ac0317affb..45fba13c800b 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -9,6 +9,9 @@
#include "symbol.h"
#include <strlist.h>
#include <intlist.h>
+#ifdef HAVE_LIBUNWIND_SUPPORT
+#include <libunwind.h>
+#endif
struct thread_stack;
@@ -32,6 +35,9 @@ struct thread {
void *priv;
struct thread_stack *ts;
+#ifdef HAVE_LIBUNWIND_SUPPORT
+ unw_addr_space_t addr_space;
+#endif
};
struct machine;
@@ -65,6 +71,8 @@ static inline int thread__set_comm(struct thread *thread, const char *comm,
return __thread__set_comm(thread, comm, timestamp, false);
}
+int thread__set_comm_from_proc(struct thread *thread);
+
int thread__comm_len(struct thread *thread);
struct comm *thread__comm(const struct thread *thread);
struct comm *thread__exec_comm(const struct thread *thread);
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index 267112b4e3db..5654fe15e036 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -260,7 +260,7 @@ struct thread_map *thread_map__new_dummy(void)
return threads;
}
-static struct thread_map *thread_map__new_by_tid_str(const char *tid_str)
+struct thread_map *thread_map__new_by_tid_str(const char *tid_str)
{
struct thread_map *threads = NULL, *nt;
int ntasks = 0;
@@ -436,3 +436,15 @@ struct thread_map *thread_map__new_event(struct thread_map_event *event)
return threads;
}
+
+bool thread_map__has(struct thread_map *threads, pid_t pid)
+{
+ int i;
+
+ for (i = 0; i < threads->nr; ++i) {
+ if (threads->map[i].pid == pid)
+ return true;
+ }
+
+ return false;
+}
diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h
index 85e4c7c4fbde..bd3b971588da 100644
--- a/tools/perf/util/thread_map.h
+++ b/tools/perf/util/thread_map.h
@@ -31,6 +31,8 @@ void thread_map__put(struct thread_map *map);
struct thread_map *thread_map__new_str(const char *pid,
const char *tid, uid_t uid);
+struct thread_map *thread_map__new_by_tid_str(const char *tid_str);
+
size_t thread_map__fprintf(struct thread_map *threads, FILE *fp);
static inline int thread_map__nr(struct thread_map *threads)
@@ -55,4 +57,5 @@ static inline char *thread_map__comm(struct thread_map *map, int thread)
}
void thread_map__read_comms(struct thread_map *threads);
+bool thread_map__has(struct thread_map *threads, pid_t pid);
#endif /* __PERF_THREAD_MAP_H */
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index 55de4cffcd4e..ac2590a3de2d 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -57,6 +57,7 @@ struct perf_tool {
id_index,
auxtrace_info,
auxtrace_error,
+ time_conv,
thread_map,
cpu_map,
stat_config,
diff --git a/tools/perf/util/trigger.h b/tools/perf/util/trigger.h
new file mode 100644
index 000000000000..e97d7016d771
--- /dev/null
+++ b/tools/perf/util/trigger.h
@@ -0,0 +1,94 @@
+#ifndef __TRIGGER_H_
+#define __TRIGGER_H_ 1
+
+#include "util/debug.h"
+#include "asm/bug.h"
+
+/*
+ * Use trigger to model operations which need to be executed when
+ * an event (a signal, for example) is observed.
+ *
+ * States and transits:
+ *
+ *
+ * OFF--(on)--> READY --(hit)--> HIT
+ * ^ |
+ * | (ready)
+ * | |
+ * \_____________/
+ *
+ * is_hit and is_ready are two key functions to query the state of
+ * a trigger. is_hit means the event already happen; is_ready means the
+ * trigger is waiting for the event.
+ */
+
+struct trigger {
+ volatile enum {
+ TRIGGER_ERROR = -2,
+ TRIGGER_OFF = -1,
+ TRIGGER_READY = 0,
+ TRIGGER_HIT = 1,
+ } state;
+ const char *name;
+};
+
+#define TRIGGER_WARN_ONCE(t, exp) \
+ WARN_ONCE(t->state != exp, "trigger '%s' state transist error: %d in %s()\n", \
+ t->name, t->state, __func__)
+
+static inline bool trigger_is_available(struct trigger *t)
+{
+ return t->state >= 0;
+}
+
+static inline bool trigger_is_error(struct trigger *t)
+{
+ return t->state <= TRIGGER_ERROR;
+}
+
+static inline void trigger_on(struct trigger *t)
+{
+ TRIGGER_WARN_ONCE(t, TRIGGER_OFF);
+ t->state = TRIGGER_READY;
+}
+
+static inline void trigger_ready(struct trigger *t)
+{
+ if (!trigger_is_available(t))
+ return;
+ t->state = TRIGGER_READY;
+}
+
+static inline void trigger_hit(struct trigger *t)
+{
+ if (!trigger_is_available(t))
+ return;
+ TRIGGER_WARN_ONCE(t, TRIGGER_READY);
+ t->state = TRIGGER_HIT;
+}
+
+static inline void trigger_off(struct trigger *t)
+{
+ if (!trigger_is_available(t))
+ return;
+ t->state = TRIGGER_OFF;
+}
+
+static inline void trigger_error(struct trigger *t)
+{
+ t->state = TRIGGER_ERROR;
+}
+
+static inline bool trigger_is_ready(struct trigger *t)
+{
+ return t->state == TRIGGER_READY;
+}
+
+static inline bool trigger_is_hit(struct trigger *t)
+{
+ return t->state == TRIGGER_HIT;
+}
+
+#define DEFINE_TRIGGER(n) \
+struct trigger n = {.state = TRIGGER_OFF, .name = #n}
+#endif
diff --git a/tools/perf/util/tsc.h b/tools/perf/util/tsc.h
index a8b78f1b3243..d5b11e2b85e0 100644
--- a/tools/perf/util/tsc.h
+++ b/tools/perf/util/tsc.h
@@ -3,10 +3,29 @@
#include <linux/types.h>
-#include "../arch/x86/util/tsc.h"
+#include "event.h"
+
+struct perf_tsc_conversion {
+ u16 time_shift;
+ u32 time_mult;
+ u64 time_zero;
+};
+struct perf_event_mmap_page;
+
+int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
+ struct perf_tsc_conversion *tc);
u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc);
u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc);
u64 rdtsc(void);
+struct perf_event_mmap_page;
+struct perf_tool;
+struct machine;
+
+int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc,
+ struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine);
+
#endif
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index ee7e372297e5..63687d3a344e 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -32,6 +32,7 @@
#include "symbol.h"
#include "util.h"
#include "debug.h"
+#include "asm/bug.h"
extern int
UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
@@ -580,43 +581,33 @@ static unw_accessors_t accessors = {
int unwind__prepare_access(struct thread *thread)
{
- unw_addr_space_t addr_space;
-
if (callchain_param.record_mode != CALLCHAIN_DWARF)
return 0;
- addr_space = unw_create_addr_space(&accessors, 0);
- if (!addr_space) {
+ thread->addr_space = unw_create_addr_space(&accessors, 0);
+ if (!thread->addr_space) {
pr_err("unwind: Can't create unwind address space.\n");
return -ENOMEM;
}
- unw_set_caching_policy(addr_space, UNW_CACHE_GLOBAL);
- thread__set_priv(thread, addr_space);
-
+ unw_set_caching_policy(thread->addr_space, UNW_CACHE_GLOBAL);
return 0;
}
void unwind__flush_access(struct thread *thread)
{
- unw_addr_space_t addr_space;
-
if (callchain_param.record_mode != CALLCHAIN_DWARF)
return;
- addr_space = thread__priv(thread);
- unw_flush_cache(addr_space, 0, 0);
+ unw_flush_cache(thread->addr_space, 0, 0);
}
void unwind__finish_access(struct thread *thread)
{
- unw_addr_space_t addr_space;
-
if (callchain_param.record_mode != CALLCHAIN_DWARF)
return;
- addr_space = thread__priv(thread);
- unw_destroy_addr_space(addr_space);
+ unw_destroy_addr_space(thread->addr_space);
}
static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
@@ -639,7 +630,9 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
* unwind itself.
*/
if (max_stack - 1 > 0) {
- addr_space = thread__priv(ui->thread);
+ WARN_ONCE(!ui->thread, "WARNING: ui->thread is NULL");
+ addr_space = ui->thread->addr_space;
+
if (addr_space == NULL)
return -1;
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index b7766c577b01..eab077ad6ca9 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -33,6 +33,8 @@ struct callchain_param callchain_param = {
unsigned int page_size;
int cacheline_size;
+unsigned int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
+
bool test_attr__enabled;
bool perf_host = true;
@@ -117,6 +119,40 @@ int rm_rf(char *path)
return rmdir(path);
}
+/* A filter which removes dot files */
+bool lsdir_no_dot_filter(const char *name __maybe_unused, struct dirent *d)
+{
+ return d->d_name[0] != '.';
+}
+
+/* lsdir reads a directory and store it in strlist */
+struct strlist *lsdir(const char *name,
+ bool (*filter)(const char *, struct dirent *))
+{
+ struct strlist *list = NULL;
+ DIR *dir;
+ struct dirent *d;
+
+ dir = opendir(name);
+ if (!dir)
+ return NULL;
+
+ list = strlist__new(NULL, NULL);
+ if (!list) {
+ errno = ENOMEM;
+ goto out;
+ }
+
+ while ((d = readdir(dir)) != NULL) {
+ if (!filter || filter(name, d))
+ strlist__add(list, d->d_name);
+ }
+
+out:
+ closedir(dir);
+ return list;
+}
+
static int slow_copyfile(const char *from, const char *to)
{
int err = -1;
@@ -471,7 +507,6 @@ int parse_callchain_record(const char *arg, struct callchain_param *param)
"needed for --call-graph fp\n");
break;
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
/* Dwarf style */
} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
const unsigned long default_stack_dump_size = 8192;
@@ -487,7 +522,6 @@ int parse_callchain_record(const char *arg, struct callchain_param *param)
ret = get_stack_size(tok, &size);
param->dump_size = size;
}
-#endif /* HAVE_DWARF_UNWIND_SUPPORT */
} else if (!strncmp(name, "lbr", sizeof("lbr"))) {
if (!strtok_r(NULL, ",", &saveptr)) {
param->record_mode = CALLCHAIN_LBR;
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 8298d607c738..7651633a8dc7 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -79,6 +79,7 @@
#include <termios.h>
#include <linux/bitops.h>
#include <termios.h>
+#include "strlist.h"
extern const char *graph_line;
extern const char *graph_dotted_line;
@@ -159,12 +160,6 @@ static inline char *gitstrchrnul(const char *s, int c)
}
#endif
-/*
- * Wrappers:
- */
-void *xrealloc(void *ptr, size_t size) __attribute__((weak));
-
-
static inline void *zalloc(size_t size)
{
return calloc(1, size);
@@ -222,6 +217,8 @@ static inline int sane_case(int x, int high)
int mkdir_p(char *path, mode_t mode);
int rm_rf(char *path);
+struct strlist *lsdir(const char *name, bool (*filter)(const char *, struct dirent *));
+bool lsdir_no_dot_filter(const char *name, struct dirent *d);
int copyfile(const char *from, const char *to);
int copyfile_mode(const char *from, const char *to, mode_t mode);
int copyfile_offset(int fromfd, loff_t from_ofs, int tofd, loff_t to_ofs, u64 size);
@@ -254,11 +251,17 @@ int hex2u64(const char *ptr, u64 *val);
char *ltrim(char *s);
char *rtrim(char *s);
+static inline char *trim(char *s)
+{
+ return ltrim(rtrim(s));
+}
+
void dump_stack(void);
void sighandler_dump_stack(int sig);
extern unsigned int page_size;
extern int cacheline_size;
+extern unsigned int sysctl_perf_event_max_stack;
struct parse_tag {
char tag;
diff --git a/tools/perf/util/wrapper.c b/tools/perf/util/wrapper.c
deleted file mode 100644
index 5f1a07c4b87b..000000000000
--- a/tools/perf/util/wrapper.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Various trivial helper wrappers around standard functions
- */
-#include "cache.h"
-
-/*
- * There's no pack memory to release - but stay close to the Git
- * version so wrap this away:
- */
-static inline void release_pack_memory(size_t size __maybe_unused,
- int flag __maybe_unused)
-{
-}
-
-void *xrealloc(void *ptr, size_t size)
-{
- void *ret = realloc(ptr, size);
- if (!ret && !size)
- ret = realloc(ptr, 1);
- if (!ret) {
- release_pack_memory(size, -1);
- ret = realloc(ptr, size);
- if (!ret && !size)
- ret = realloc(ptr, 1);
- if (!ret)
- die("Out of memory, realloc failed");
- }
- return ret;
-}