summaryrefslogtreecommitdiffstats
path: root/tools/perf/util
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util')
-rw-r--r--tools/perf/util/Build3
-rw-r--r--tools/perf/util/annotate.c38
-rw-r--r--tools/perf/util/annotate.h4
-rw-r--r--tools/perf/util/arm-spe.c1
-rw-r--r--tools/perf/util/auxtrace.c12
-rw-r--r--tools/perf/util/auxtrace.h26
-rw-r--r--tools/perf/util/bpf-event.c1
-rw-r--r--tools/perf/util/bpf-event.h15
-rw-r--r--tools/perf/util/bpf-loader.c2
-rw-r--r--tools/perf/util/branch.c2
-rw-r--r--tools/perf/util/branch.h9
-rw-r--r--tools/perf/util/build-id.c3
-rw-r--r--tools/perf/util/callchain.c1
-rw-r--r--tools/perf/util/callchain.h5
-rw-r--r--tools/perf/util/cloexec.c2
-rw-r--r--tools/perf/util/copyfile.c144
-rw-r--r--tools/perf/util/copyfile.h16
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c1
-rw-r--r--tools/perf/util/cs-etm.c4
-rw-r--r--tools/perf/util/data-convert-bt.c5
-rw-r--r--tools/perf/util/data.c3
-rw-r--r--tools/perf/util/debug.c1
-rw-r--r--tools/perf/util/debug.h2
-rw-r--r--tools/perf/util/demangle-java.c1
-rw-r--r--tools/perf/util/demangle-rust.c1
-rw-r--r--tools/perf/util/dwarf-regs.c1
-rw-r--r--tools/perf/util/env.h3
-rw-r--r--tools/perf/util/event.c1109
-rw-r--r--tools/perf/util/event.h77
-rw-r--r--tools/perf/util/evlist.c295
-rw-r--r--tools/perf/util/evlist.h81
-rw-r--r--tools/perf/util/evsel.c487
-rw-r--r--tools/perf/util/evsel.h126
-rw-r--r--tools/perf/util/evsel_config.h50
-rw-r--r--tools/perf/util/evsel_fprintf.c16
-rw-r--r--tools/perf/util/evsel_fprintf.h50
-rw-r--r--tools/perf/util/genelf.h3
-rw-r--r--tools/perf/util/header.c424
-rw-r--r--tools/perf/util/header.h60
-rw-r--r--tools/perf/util/hist.h1
-rw-r--r--tools/perf/util/intel-bts.c6
-rw-r--r--tools/perf/util/intel-pt.c11
-rw-r--r--tools/perf/util/jitdump.c10
-rw-r--r--tools/perf/util/kvm-stat.h4
-rw-r--r--tools/perf/util/libunwind/arm64.c1
-rw-r--r--tools/perf/util/libunwind/x86_32.c1
-rw-r--r--tools/perf/util/llvm-utils.c7
-rw-r--r--tools/perf/util/lzma.c2
-rw-r--r--tools/perf/util/machine.c16
-rw-r--r--tools/perf/util/machine.h15
-rw-r--r--tools/perf/util/map.c3
-rw-r--r--tools/perf/util/memswap.h7
-rw-r--r--tools/perf/util/mmap.c185
-rw-r--r--tools/perf/util/mmap.h77
-rw-r--r--tools/perf/util/namespaces.c18
-rw-r--r--tools/perf/util/namespaces.h2
-rw-r--r--tools/perf/util/parse-events.c9
-rw-r--r--tools/perf/util/parse-events.y4
-rw-r--r--tools/perf/util/perf-hooks.c1
-rw-r--r--tools/perf/util/perf_event_attr_fprintf.c148
-rw-r--r--tools/perf/util/pmu.c1
-rw-r--r--tools/perf/util/probe-event.c1
-rw-r--r--tools/perf/util/probe-file.c1
-rw-r--r--tools/perf/util/probe-finder.c19
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/python.c34
-rw-r--r--tools/perf/util/record.c8
-rw-r--r--tools/perf/util/rwsem.c1
-rw-r--r--tools/perf/util/s390-cpumsf.c1
-rw-r--r--tools/perf/util/s390-sample-raw.c1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c2
-rw-r--r--tools/perf/util/session.c92
-rw-r--r--tools/perf/util/session.h5
-rw-r--r--tools/perf/util/sort.c2
-rw-r--r--tools/perf/util/srccode.c2
-rw-r--r--tools/perf/util/stat-shadow.c4
-rw-r--r--tools/perf/util/stat.c62
-rw-r--r--tools/perf/util/stat.h9
-rw-r--r--tools/perf/util/svghelper.c2
-rw-r--r--tools/perf/util/symbol-elf.c5
-rw-r--r--tools/perf/util/symbol-minimal.c3
-rw-r--r--tools/perf/util/symbol.c2
-rw-r--r--tools/perf/util/synthetic-events.c1884
-rw-r--r--tools/perf/util/synthetic-events.h103
-rw-r--r--tools/perf/util/target.c2
-rw-r--r--tools/perf/util/top.c3
-rw-r--r--tools/perf/util/trace-event-info.c2
-rw-r--r--tools/perf/util/trace-event-read.c1
-rw-r--r--tools/perf/util/trace-event.c1
-rw-r--r--tools/perf/util/tsc.h14
-rw-r--r--tools/perf/util/unwind-libdw.c1
-rw-r--r--tools/perf/util/unwind-libunwind-local.c1
-rw-r--r--tools/perf/util/usage.c1
-rw-r--r--tools/perf/util/util.c136
-rw-r--r--tools/perf/util/util.h8
-rw-r--r--tools/perf/util/vdso.c2
-rw-r--r--tools/perf/util/zlib.c4
97 files changed, 2965 insertions, 3068 deletions
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 0b4d8e0d474c..8dcfca1a882f 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -3,6 +3,7 @@ perf-y += block-range.o
perf-y += build-id.o
perf-y += cacheline.o
perf-y += config.o
+perf-y += copyfile.o
perf-y += ctype.o
perf-y += db-export.o
perf-y += env.o
@@ -10,6 +11,7 @@ perf-y += event.o
perf-y += evlist.o
perf-y += evsel.o
perf-y += evsel_fprintf.o
+perf-y += perf_event_attr_fprintf.o
perf-y += evswitch.o
perf-y += find_bit.o
perf-y += get_current_dir_name.o
@@ -86,6 +88,7 @@ perf-y += stat-display.o
perf-y += record.o
perf-y += srcline.o
perf-y += srccode.o
+perf-y += synthetic-events.o
perf-y += data.o
perf-y += tsc.o
perf-y += cloexec.o
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 1748f528b6e9..4036c7f7b0fb 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -14,7 +14,7 @@
#include <bpf/btf.h>
#include <bpf/libbpf.h>
#include <linux/btf.h>
-#include "util.h"
+#include "util.h" // hex_width()
#include "ui/ui.h"
#include "sort.h"
#include "build-id.h"
@@ -34,6 +34,7 @@
#include "bpf-event.h"
#include "block-range.h"
#include "string2.h"
+#include "util/event.h"
#include "arch/common.h"
#include <regex.h>
#include <pthread.h>
@@ -1630,6 +1631,19 @@ int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *
case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
break;
+ case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP:
+ scnprintf(buf, buflen, "Problems with arch specific instruction name regular expressions.");
+ break;
+ case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING:
+ scnprintf(buf, buflen, "Problems while parsing the CPUID in the arch specific initialization.");
+ break;
+ case SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE:
+ scnprintf(buf, buflen, "Invalid BPF file: %s.", dso->long_name);
+ break;
+ case SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF:
+ scnprintf(buf, buflen, "The %s BPF file has no BTF section, compile with -g or use pahole -J.",
+ dso->long_name);
+ break;
default:
scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
break;
@@ -1661,7 +1675,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
build_id_path = strdup(filename);
if (!build_id_path)
- return -1;
+ return ENOMEM;
/*
* old style build-id cache has name of XX/XXXXXXX.. while
@@ -1712,13 +1726,13 @@ static int symbol__disassemble_bpf(struct symbol *sym,
char tpath[PATH_MAX];
size_t buf_size;
int nr_skip = 0;
- int ret = -1;
char *buf;
bfd *bfdf;
+ int ret;
FILE *s;
if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
- return -1;
+ return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE;
pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
sym->name, sym->start, sym->end - sym->start);
@@ -1731,8 +1745,10 @@ static int symbol__disassemble_bpf(struct symbol *sym,
assert(bfd_check_format(bfdf, bfd_object));
s = open_memstream(&buf, &buf_size);
- if (!s)
+ if (!s) {
+ ret = errno;
goto out;
+ }
init_disassemble_info(&info, s,
(fprintf_ftype) fprintf);
@@ -1741,8 +1757,10 @@ static int symbol__disassemble_bpf(struct symbol *sym,
info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
dso->bpf_prog.id);
- if (!info_node)
+ if (!info_node) {
+ return SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
goto out;
+ }
info_linear = info_node->info_linear;
sub_id = dso->bpf_prog.sub_id;
@@ -2070,11 +2088,11 @@ int symbol__annotate(struct symbol *sym, struct map *map,
int err;
if (!arch_name)
- return -1;
+ return errno;
args.arch = arch = arch__find(arch_name);
if (arch == NULL)
- return -ENOTSUP;
+ return ENOTSUP;
if (parch)
*parch = arch;
@@ -2970,7 +2988,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct evsel *evsel,
notes->offsets = zalloc(size * sizeof(struct annotation_line *));
if (notes->offsets == NULL)
- return -1;
+ return ENOMEM;
if (perf_evsel__is_group_event(evsel))
nr_pcnt = evsel->core.nr_members;
@@ -2996,7 +3014,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct evsel *evsel,
out_free_offsets:
zfree(&notes->offsets);
- return -1;
+ return err;
}
#define ANNOTATION__CFG(n) \
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index d94be9140e31..d76fd0e81f46 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -370,6 +370,10 @@ enum symbol_disassemble_errno {
SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX = __SYMBOL_ANNOTATE_ERRNO__START,
SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF,
+ SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING,
+ SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP,
+ SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE,
+ SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF,
__SYMBOL_ANNOTATE_ERRNO__END,
};
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 8a7340f6a2a2..53be12b23ff4 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -16,7 +16,6 @@
#include <linux/log2.h>
#include <linux/zalloc.h>
-#include "cpumap.h"
#include "color.h"
#include "evsel.h"
#include "machine.h"
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 6f25224a3def..8470dfe9fe97 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -31,8 +31,8 @@
#include "map.h"
#include "pmu.h"
#include "evsel.h"
-#include "cpumap.h"
#include "symbol.h"
+#include "util/synthetic-events.h"
#include "thread_map.h"
#include "asm/bug.h"
#include "auxtrace.h"
@@ -50,10 +50,12 @@
#include "intel-bts.h"
#include "arm-spe.h"
#include "s390-cpumsf.h"
-#include "util.h"
+#include "util/mmap.h"
#include <linux/ctype.h>
+#include <linux/kernel.h>
#include "symbol/kallsyms.h"
+#include <internal/lib.h>
static bool auxtrace__dont_decode(struct perf_session *session)
{
@@ -1226,7 +1228,7 @@ int perf_event__process_auxtrace_error(struct perf_session *session,
return 0;
}
-static int __auxtrace_mmap__read(struct perf_mmap *map,
+static int __auxtrace_mmap__read(struct mmap *map,
struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn,
bool snapshot, size_t snapshot_size)
@@ -1337,13 +1339,13 @@ static int __auxtrace_mmap__read(struct perf_mmap *map,
return 1;
}
-int auxtrace_mmap__read(struct perf_mmap *map, struct auxtrace_record *itr,
+int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn)
{
return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
}
-int auxtrace_mmap__read_snapshot(struct perf_mmap *map,
+int auxtrace_mmap__read_snapshot(struct mmap *map,
struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn,
size_t snapshot_size)
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 37e70dc01436..f201f36bc35f 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -11,21 +11,22 @@
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
+#include <stdio.h> // FILE
#include <linux/list.h>
#include <linux/perf_event.h>
#include <linux/types.h>
#include <asm/bitsperlong.h>
#include <asm/barrier.h>
-#include "event.h"
-
union perf_event;
struct perf_session;
struct evlist;
struct perf_tool;
-struct perf_mmap;
+struct mmap;
+struct perf_sample;
struct option;
struct record_opts;
+struct perf_record_auxtrace_error;
struct perf_record_auxtrace_info;
struct events_stats;
@@ -444,14 +445,14 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
bool per_cpu);
typedef int (*process_auxtrace_t)(struct perf_tool *tool,
- struct perf_mmap *map,
+ struct mmap *map,
union perf_event *event, void *data1,
size_t len1, void *data2, size_t len2);
-int auxtrace_mmap__read(struct perf_mmap *map, struct auxtrace_record *itr,
+int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn);
-int auxtrace_mmap__read_snapshot(struct perf_mmap *map,
+int auxtrace_mmap__read_snapshot(struct mmap *map,
struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn,
size_t snapshot_size);
@@ -524,10 +525,6 @@ void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
const char *msg, u64 timestamp);
-int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
- struct perf_tool *tool,
- struct perf_session *session,
- perf_event__handler_t process);
int perf_event__process_auxtrace_info(struct perf_session *session,
union perf_event *event);
s64 perf_event__process_auxtrace(struct perf_session *session,
@@ -604,15 +601,6 @@ void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
{
}
-static inline int
-perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr __maybe_unused,
- struct perf_tool *tool __maybe_unused,
- struct perf_session *session __maybe_unused,
- perf_event__handler_t process __maybe_unused)
-{
- return -EINVAL;
-}
-
static inline
int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
struct evlist *evlist __maybe_unused,
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 7a3d4b125323..f7ed5d122e22 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -16,6 +16,7 @@
#include "map.h"
#include "evlist.h"
#include "record.h"
+#include "util/synthetic-events.h"
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
index a01c2fd68c03..81fdc88e6c1a 100644
--- a/tools/perf/util/bpf-event.h
+++ b/tools/perf/util/bpf-event.h
@@ -6,9 +6,9 @@
#include <linux/rbtree.h>
#include <pthread.h>
#include <api/fd/array.h>
-#include "event.h"
#include <stdio.h>
+struct bpf_prog_info;
struct machine;
union perf_event;
struct perf_env;
@@ -33,11 +33,6 @@ struct btf_node {
#ifdef HAVE_LIBBPF_SUPPORT
int machine__process_bpf(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
-
-int perf_event__synthesize_bpf_events(struct perf_session *session,
- perf_event__handler_t process,
- struct machine *machine,
- struct record_opts *opts);
int bpf_event__add_sb_event(struct evlist **evlist,
struct perf_env *env);
void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
@@ -51,14 +46,6 @@ static inline int machine__process_bpf(struct machine *machine __maybe_unused,
return 0;
}
-static inline int perf_event__synthesize_bpf_events(struct perf_session *session __maybe_unused,
- perf_event__handler_t process __maybe_unused,
- struct machine *machine __maybe_unused,
- struct record_opts *opts __maybe_unused)
-{
- return 0;
-}
-
static inline int bpf_event__add_sb_event(struct evlist **evlist __maybe_unused,
struct perf_env *env __maybe_unused)
{
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 37283e865352..10c187b8b8ea 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -1568,7 +1568,7 @@ struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
return ERR_PTR(-err);
}
- evsel = perf_evlist__last(evlist);
+ evsel = evlist__last(evlist);
}
bpf__for_each_map_named(map, obj, tmp, name) {
diff --git a/tools/perf/util/branch.c b/tools/perf/util/branch.c
index 9d1e090084a2..2285b1eb3128 100644
--- a/tools/perf/util/branch.c
+++ b/tools/perf/util/branch.c
@@ -1,5 +1,3 @@
-#include "util/util.h"
-#include "util/debug.h"
#include "util/map_symbol.h"
#include "util/branch.h"
#include <linux/kernel.h>
diff --git a/tools/perf/util/branch.h b/tools/perf/util/branch.h
index 06f66dad0b79..88e00d268f6f 100644
--- a/tools/perf/util/branch.h
+++ b/tools/perf/util/branch.h
@@ -1,8 +1,15 @@
#ifndef _PERF_BRANCH_H
#define _PERF_BRANCH_H 1
-
+/*
+ * The linux/stddef.h isn't need here, but is needed for __always_inline used
+ * in files included from uapi/linux/perf_event.h such as
+ * /usr/include/linux/swab.h and /usr/include/linux/byteorder/little_endian.h,
+ * detected in at least musl libc, used in Alpine Linux. -acme
+ */
#include <stdio.h>
#include <stdint.h>
+#include <linux/compiler.h>
+#include <linux/stddef.h>
#include <linux/perf_event.h>
#include <linux/types.h>
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index e5fb77755d9e..c076fc7fe025 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -7,12 +7,13 @@
* Copyright (C) 2009, 2010 Red Hat Inc.
* Copyright (C) 2009, 2010 Arnaldo Carvalho de Melo <acme@redhat.com>
*/
-#include "util.h"
+#include "util.h" // lsdir(), mkdir_p(), rm_rf()
#include <dirent.h>
#include <errno.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include "util/copyfile.h"
#include "dso.h"
#include "build-id.h"
#include "event.h"
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index c14646c1f2eb..9a9b56ed3f0a 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -23,6 +23,7 @@
#include "debug.h"
#include "dso.h"
+#include "event.h"
#include "hist.h"
#include "sort.h"
#include "machine.h"
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index b042ceef4114..83398e5bbe4b 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -4,12 +4,15 @@
#include <linux/list.h>
#include <linux/rbtree.h>
-#include "event.h"
#include "map_symbol.h"
#include "branch.h"
+struct addr_location;
struct evsel;
+struct ip_callchain;
struct map;
+struct perf_sample;
+struct thread;
#define HELP_PAD "\t\t\t\t"
diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
index 4e904fcb2783..a12872f2856a 100644
--- a/tools/perf/util/cloexec.c
+++ b/tools/perf/util/cloexec.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <sched.h>
-#include "util.h"
+#include "util.h" // for sched_getcpu()
#include "../perf-sys.h"
#include "cloexec.h"
#include "event.h"
diff --git a/tools/perf/util/copyfile.c b/tools/perf/util/copyfile.c
new file mode 100644
index 000000000000..3fa0db136667
--- /dev/null
+++ b/tools/perf/util/copyfile.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "util/copyfile.h"
+#include "util/namespaces.h"
+#include <internal/lib.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+static int slow_copyfile(const char *from, const char *to, struct nsinfo *nsi)
+{
+ int err = -1;
+ char *line = NULL;
+ size_t n;
+ FILE *from_fp, *to_fp;
+ struct nscookie nsc;
+
+ nsinfo__mountns_enter(nsi, &nsc);
+ from_fp = fopen(from, "r");
+ nsinfo__mountns_exit(&nsc);
+ if (from_fp == NULL)
+ goto out;
+
+ to_fp = fopen(to, "w");
+ if (to_fp == NULL)
+ goto out_fclose_from;
+
+ while (getline(&line, &n, from_fp) > 0)
+ if (fputs(line, to_fp) == EOF)
+ goto out_fclose_to;
+ err = 0;
+out_fclose_to:
+ fclose(to_fp);
+ free(line);
+out_fclose_from:
+ fclose(from_fp);
+out:
+ return err;
+}
+
+int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
+{
+ void *ptr;
+ loff_t pgoff;
+
+ pgoff = off_in & ~(page_size - 1);
+ off_in -= pgoff;
+
+ ptr = mmap(NULL, off_in + size, PROT_READ, MAP_PRIVATE, ifd, pgoff);
+ if (ptr == MAP_FAILED)
+ return -1;
+
+ while (size) {
+ ssize_t ret = pwrite(ofd, ptr + off_in, size, off_out);
+ if (ret < 0 && errno == EINTR)
+ continue;
+ if (ret <= 0)
+ break;
+
+ size -= ret;
+ off_in += ret;
+ off_out += ret;
+ }
+ munmap(ptr, off_in + size);
+
+ return size ? -1 : 0;
+}
+
+static int copyfile_mode_ns(const char *from, const char *to, mode_t mode,
+ struct nsinfo *nsi)
+{
+ int fromfd, tofd;
+ struct stat st;
+ int err;
+ char *tmp = NULL, *ptr = NULL;
+ struct nscookie nsc;
+
+ nsinfo__mountns_enter(nsi, &nsc);
+ err = stat(from, &st);
+ nsinfo__mountns_exit(&nsc);
+ if (err)
+ goto out;
+ err = -1;
+
+ /* extra 'x' at the end is to reserve space for '.' */
+ if (asprintf(&tmp, "%s.XXXXXXx", to) < 0) {
+ tmp = NULL;
+ goto out;
+ }
+ ptr = strrchr(tmp, '/');
+ if (!ptr)
+ goto out;
+ ptr = memmove(ptr + 1, ptr, strlen(ptr) - 1);
+ *ptr = '.';
+
+ tofd = mkstemp(tmp);
+ if (tofd < 0)
+ goto out;
+
+ if (fchmod(tofd, mode))
+ goto out_close_to;
+
+ if (st.st_size == 0) { /* /proc? do it slowly... */
+ err = slow_copyfile(from, tmp, nsi);
+ goto out_close_to;
+ }
+
+ nsinfo__mountns_enter(nsi, &nsc);
+ fromfd = open(from, O_RDONLY);
+ nsinfo__mountns_exit(&nsc);
+ if (fromfd < 0)
+ goto out_close_to;
+
+ err = copyfile_offset(fromfd, 0, tofd, 0, st.st_size);
+
+ close(fromfd);
+out_close_to:
+ close(tofd);
+ if (!err)
+ err = link(tmp, to);
+ unlink(tmp);
+out:
+ free(tmp);
+ return err;
+}
+
+int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi)
+{
+ return copyfile_mode_ns(from, to, 0755, nsi);
+}
+
+int copyfile_mode(const char *from, const char *to, mode_t mode)
+{
+ return copyfile_mode_ns(from, to, mode, NULL);
+}
+
+int copyfile(const char *from, const char *to)
+{
+ return copyfile_mode(from, to, 0755);
+}
diff --git a/tools/perf/util/copyfile.h b/tools/perf/util/copyfile.h
new file mode 100644
index 000000000000..e85d2f22f3cc
--- /dev/null
+++ b/tools/perf/util/copyfile.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef PERF_COPYFILE_H_
+#define PERF_COPYFILE_H_
+
+#include <linux/types.h>
+#include <sys/types.h>
+#include <fcntl.h>
+
+struct nsinfo;
+
+int copyfile(const char *from, const char *to);
+int copyfile_mode(const char *from, const char *to, mode_t mode);
+int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi);
+int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size);
+
+#endif // PERF_COPYFILE_H_
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 37d7c492b155..cd92a99eb89d 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -17,7 +17,6 @@
#include "cs-etm.h"
#include "cs-etm-decoder.h"
#include "intlist.h"
-#include "util.h"
/* use raw logging */
#ifdef CS_DEBUG_RAW
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 707afdbd9529..4ba0f871f086 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -35,7 +35,7 @@
#include "thread.h"
#include "thread-stack.h"
#include <tools/libc_compat.h>
-#include "util.h"
+#include "util/synthetic-events.h"
#define MAX_TIMESTAMP (~0ULL)
@@ -1298,7 +1298,7 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
attr.read_format = evsel->core.attr.read_format;
/* create new id val to be a fixed offset from evsel id */
- id = evsel->id[0] + 1000000000;
+ id = evsel->core.id[0] + 1000000000;
if (!id)
id = 1;
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 0c268449959c..dbc772bfb04e 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -30,6 +30,7 @@
#include "machine.h"
#include "config.h"
#include <linux/ctype.h>
+#include <linux/err.h>
#define pr_N(n, fmt, ...) \
eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
@@ -1619,8 +1620,10 @@ int bt_convert__perf2ctf(const char *input, const char *path,
err = -1;
/* perf.data session */
session = perf_session__new(&data, 0, &c.tool);
- if (!session)
+ if (IS_ERR(session)) {
+ err = PTR_ERR(session);
goto free_writer;
+ }
if (c.queue_size) {
ordered_events__set_alloc_size(&session->ordered_events,
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index e75c3a279fe8..88fba2ba549f 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -13,9 +13,10 @@
#include <dirent.h>
#include "data.h"
-#include "util.h"
+#include "util.h" // rm_rf_perf_data()
#include "debug.h"
#include "header.h"
+#include <internal/lib.h>
static void close_dir(struct perf_data_file *files, int nr)
{
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index a1b59bd35519..e55114f0336f 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -17,7 +17,6 @@
#include "event.h"
#include "debug.h"
#include "print_binary.h"
-#include "util.h"
#include "target.h"
#include "ui/helpline.h"
#include "ui/ui.h"
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index b2deee987ffa..d25ae1c4cee9 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -3,9 +3,9 @@
#ifndef __PERF_DEBUG_H
#define __PERF_DEBUG_H
+#include <stdarg.h>
#include <stdbool.h>
#include <linux/compiler.h>
-#include "../ui/util.h"
extern int verbose;
extern bool quiet, dump_trace;
diff --git a/tools/perf/util/demangle-java.c b/tools/perf/util/demangle-java.c
index 763328c151e9..6fb7f34c0814 100644
--- a/tools/perf/util/demangle-java.c
+++ b/tools/perf/util/demangle-java.c
@@ -3,7 +3,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include "debug.h"
#include "symbol.h"
#include "demangle-java.h"
diff --git a/tools/perf/util/demangle-rust.c b/tools/perf/util/demangle-rust.c
index 423afbbd386b..a659fc69f73a 100644
--- a/tools/perf/util/demangle-rust.c
+++ b/tools/perf/util/demangle-rust.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
-#include "util.h"
#include "debug.h"
#include "demangle-rust.h"
diff --git a/tools/perf/util/dwarf-regs.c b/tools/perf/util/dwarf-regs.c
index db55eddce8cd..1b49ecee5aff 100644
--- a/tools/perf/util/dwarf-regs.c
+++ b/tools/perf/util/dwarf-regs.c
@@ -5,7 +5,6 @@
* Written by: Masami Hiramatsu <mhiramat@kernel.org>
*/
-#include <util.h>
#include <debug.h>
#include <dwarf-regs.h>
#include <elf.h>
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index d8e083d42610..db40906e2937 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -4,9 +4,10 @@
#include <linux/types.h>
#include <linux/rbtree.h>
-#include "cpumap.h"
#include "rwsem.h"
+struct perf_cpu_map;
+
struct cpu_topology_map {
int socket_id;
int die_id;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index f4afbb858ebb..fc1e5a991008 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1,16 +1,16 @@
-#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include <perf/cpumap.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
-#include <api/fs/fs.h>
#include <linux/perf_event.h>
#include <linux/zalloc.h>
+#include "cpumap.h"
#include "dso.h"
#include "event.h"
#include "debug.h"
@@ -24,6 +24,7 @@
#include "time-utils.h"
#include <linux/ctype.h>
#include "map.h"
+#include "util/namespaces.h"
#include "symbol.h"
#include "symbol/kallsyms.h"
#include "asm/bug.h"
@@ -33,8 +34,6 @@
#include "tool.h"
#include "../perf.h"
-#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
-
static const char *perf_event__names[] = {
[0] = "TOTAL",
[PERF_RECORD_MMAP] = "MMAP",
@@ -75,18 +74,6 @@ static const char *perf_event__names[] = {
[PERF_RECORD_COMPRESSED] = "COMPRESSED",
};
-static const char *perf_ns__names[] = {
- [NET_NS_INDEX] = "net",
- [UTS_NS_INDEX] = "uts",
- [IPC_NS_INDEX] = "ipc",
- [PID_NS_INDEX] = "pid",
- [USER_NS_INDEX] = "user",
- [MNT_NS_INDEX] = "mnt",
- [CGROUP_NS_INDEX] = "cgroup",
-};
-
-unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
-
const char *perf_event__name(unsigned int id)
{
if (id >= ARRAY_SIZE(perf_event__names))
@@ -96,775 +83,6 @@ const char *perf_event__name(unsigned int id)
return perf_event__names[id];
}
-static const char *perf_ns__name(unsigned int id)
-{
- if (id >= ARRAY_SIZE(perf_ns__names))
- return "UNKNOWN";
- return perf_ns__names[id];
-}
-
-int perf_tool__process_synth_event(struct perf_tool *tool,
- union perf_event *event,
- struct machine *machine,
- perf_event__handler_t process)
-{
- struct perf_sample synth_sample = {
- .pid = -1,
- .tid = -1,
- .time = -1,
- .stream_id = -1,
- .cpu = -1,
- .period = 1,
- .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
- };
-
- return process(tool, event, &synth_sample, machine);
-};
-
-/*
- * Assumes that the first 4095 bytes of /proc/pid/stat contains
- * the comm, tgid and ppid.
- */
-static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
- pid_t *tgid, pid_t *ppid)
-{
- char filename[PATH_MAX];
- char bf[4096];
- int fd;
- size_t size = 0;
- ssize_t n;
- char *name, *tgids, *ppids;
-
- *tgid = -1;
- *ppid = -1;
-
- snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
-
- fd = open(filename, O_RDONLY);
- if (fd < 0) {
- pr_debug("couldn't open %s\n", filename);
- return -1;
- }
-
- n = read(fd, bf, sizeof(bf) - 1);
- close(fd);
- if (n <= 0) {
- pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
- pid);
- return -1;
- }
- bf[n] = '\0';
-
- name = strstr(bf, "Name:");
- tgids = strstr(bf, "Tgid:");
- ppids = strstr(bf, "PPid:");
-
- if (name) {
- char *nl;
-
- name = skip_spaces(name + 5); /* strlen("Name:") */
- nl = strchr(name, '\n');
- if (nl)
- *nl = '\0';
-
- size = strlen(name);
- if (size >= len)
- size = len - 1;
- memcpy(comm, name, size);
- comm[size] = '\0';
- } else {
- pr_debug("Name: string not found for pid %d\n", pid);
- }
-
- if (tgids) {
- tgids += 5; /* strlen("Tgid:") */
- *tgid = atoi(tgids);
- } else {
- pr_debug("Tgid: string not found for pid %d\n", pid);
- }
-
- if (ppids) {
- ppids += 5; /* strlen("PPid:") */
- *ppid = atoi(ppids);
- } else {
- pr_debug("PPid: string not found for pid %d\n", pid);
- }
-
- return 0;
-}
-
-static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
- struct machine *machine,
- pid_t *tgid, pid_t *ppid)
-{
- size_t size;
-
- *ppid = -1;
-
- memset(&event->comm, 0, sizeof(event->comm));
-
- if (machine__is_host(machine)) {
- if (perf_event__get_comm_ids(pid, event->comm.comm,
- sizeof(event->comm.comm),
- tgid, ppid) != 0) {
- return -1;
- }
- } else {
- *tgid = machine->pid;
- }
-
- if (*tgid < 0)
- return -1;
-
- event->comm.pid = *tgid;
- event->comm.header.type = PERF_RECORD_COMM;
-
- size = strlen(event->comm.comm) + 1;
- size = PERF_ALIGN(size, sizeof(u64));
- memset(event->comm.comm + size, 0, machine->id_hdr_size);
- event->comm.header.size = (sizeof(event->comm) -
- (sizeof(event->comm.comm) - size) +
- machine->id_hdr_size);
- event->comm.tid = pid;
-
- return 0;
-}
-
-pid_t perf_event__synthesize_comm(struct perf_tool *tool,
- union perf_event *event, pid_t pid,
- perf_event__handler_t process,
- struct machine *machine)
-{
- pid_t tgid, ppid;
-
- if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
- return -1;
-
- if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
- return -1;
-
- return tgid;
-}
-
-static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
- struct perf_ns_link_info *ns_link_info)
-{
- struct stat64 st;
- char proc_ns[128];
-
- sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
- if (stat64(proc_ns, &st) == 0) {
- ns_link_info->dev = st.st_dev;
- ns_link_info->ino = st.st_ino;
- }
-}
-
-int perf_event__synthesize_namespaces(struct perf_tool *tool,
- union perf_event *event,
- pid_t pid, pid_t tgid,
- perf_event__handler_t process,
- struct machine *machine)
-{
- u32 idx;
- struct perf_ns_link_info *ns_link_info;
-
- if (!tool || !tool->namespace_events)
- return 0;
-
- memset(&event->namespaces, 0, (sizeof(event->namespaces) +
- (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
- machine->id_hdr_size));
-
- event->namespaces.pid = tgid;
- event->namespaces.tid = pid;
-
- event->namespaces.nr_namespaces = NR_NAMESPACES;
-
- ns_link_info = event->namespaces.link_info;
-
- for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
- perf_event__get_ns_link_info(pid, perf_ns__name(idx),
- &ns_link_info[idx]);
-
- event->namespaces.header.type = PERF_RECORD_NAMESPACES;
-
- event->namespaces.header.size = (sizeof(event->namespaces) +
- (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
- machine->id_hdr_size);
-
- if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
- return -1;
-
- return 0;
-}
-
-static int perf_event__synthesize_fork(struct perf_tool *tool,
- union perf_event *event,
- pid_t pid, pid_t tgid, pid_t ppid,
- perf_event__handler_t process,
- struct machine *machine)
-{
- memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
-
- /*
- * for main thread set parent to ppid from status file. For other
- * threads set parent pid to main thread. ie., assume main thread
- * spawns all threads in a process
- */
- if (tgid == pid) {
- event->fork.ppid = ppid;
- event->fork.ptid = ppid;
- } else {
- event->fork.ppid = tgid;
- event->fork.ptid = tgid;
- }
- event->fork.pid = tgid;
- event->fork.tid = pid;
- event->fork.header.type = PERF_RECORD_FORK;
- event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
-
- event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
-
- if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
- return -1;
-
- return 0;
-}
-
-int perf_event__synthesize_mmap_events(struct perf_tool *tool,
- union perf_event *event,
- pid_t pid, pid_t tgid,
- perf_event__handler_t process,
- struct machine *machine,
- bool mmap_data)
-{
- char filename[PATH_MAX];
- FILE *fp;
- unsigned long long t;
- bool truncation = false;
- unsigned long long timeout = proc_map_timeout * 1000000ULL;
- int rc = 0;
- const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
- int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
-
- if (machine__is_default_guest(machine))
- return 0;
-
- snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
- machine->root_dir, pid, pid);
-
- fp = fopen(filename, "r");
- if (fp == NULL) {
- /*
- * We raced with a task exiting - just return:
- */
- pr_debug("couldn't open %s\n", filename);
- return -1;
- }
-
- event->header.type = PERF_RECORD_MMAP2;
- t = rdclock();
-
- while (1) {
- char bf[BUFSIZ];
- char prot[5];
- char execname[PATH_MAX];
- char anonstr[] = "//anon";
- unsigned int ino;
- size_t size;
- ssize_t n;
-
- if (fgets(bf, sizeof(bf), fp) == NULL)
- break;
-
- if ((rdclock() - t) > timeout) {
- pr_warning("Reading %s time out. "
- "You may want to increase "
- "the time limit by --proc-map-timeout\n",
- filename);
- truncation = true;
- goto out;
- }
-
- /* ensure null termination since stack will be reused. */
- strcpy(execname, "");
-
- /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
- n = sscanf(bf, "%"PRI_lx64"-%"PRI_lx64" %s %"PRI_lx64" %x:%x %u %[^\n]\n",
- &event->mmap2.start, &event->mmap2.len, prot,
- &event->mmap2.pgoff, &event->mmap2.maj,
- &event->mmap2.min,
- &ino, execname);
-
- /*
- * Anon maps don't have the execname.
- */
- if (n < 7)
- continue;
-
- event->mmap2.ino = (u64)ino;
-
- /*
- * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
- */
- if (machine__is_host(machine))
- event->header.misc = PERF_RECORD_MISC_USER;
- else
- event->header.misc = PERF_RECORD_MISC_GUEST_USER;
-
- /* map protection and flags bits */
- event->mmap2.prot = 0;
- event->mmap2.flags = 0;
- if (prot[0] == 'r')
- event->mmap2.prot |= PROT_READ;
- if (prot[1] == 'w')
- event->mmap2.prot |= PROT_WRITE;
- if (prot[2] == 'x')
- event->mmap2.prot |= PROT_EXEC;
-
- if (prot[3] == 's')
- event->mmap2.flags |= MAP_SHARED;
- else
- event->mmap2.flags |= MAP_PRIVATE;
-
- if (prot[2] != 'x') {
- if (!mmap_data || prot[0] != 'r')
- continue;
-
- event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
- }
-
-out:
- if (truncation)
- event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
-
- if (!strcmp(execname, ""))
- strcpy(execname, anonstr);
-
- if (hugetlbfs_mnt_len &&
- !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
- strcpy(execname, anonstr);
- event->mmap2.flags |= MAP_HUGETLB;
- }
-
- size = strlen(execname) + 1;
- memcpy(event->mmap2.filename, execname, size);
- size = PERF_ALIGN(size, sizeof(u64));
- event->mmap2.len -= event->mmap.start;
- event->mmap2.header.size = (sizeof(event->mmap2) -
- (sizeof(event->mmap2.filename) - size));
- memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
- event->mmap2.header.size += machine->id_hdr_size;
- event->mmap2.pid = tgid;
- event->mmap2.tid = pid;
-
- if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
- rc = -1;
- break;
- }
-
- if (truncation)
- break;
- }
-
- fclose(fp);
- return rc;
-}
-
-int perf_event__synthesize_modules(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine)
-{
- int rc = 0;
- struct map *pos;
- struct maps *maps = machine__kernel_maps(machine);
- union perf_event *event = zalloc((sizeof(event->mmap) +
- machine->id_hdr_size));
- if (event == NULL) {
- pr_debug("Not enough memory synthesizing mmap event "
- "for kernel modules\n");
- return -1;
- }
-
- event->header.type = PERF_RECORD_MMAP;
-
- /*
- * kernel uses 0 for user space maps, see kernel/perf_event.c
- * __perf_event_mmap
- */
- if (machine__is_host(machine))
- event->header.misc = PERF_RECORD_MISC_KERNEL;
- else
- event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
-
- for (pos = maps__first(maps); pos; pos = map__next(pos)) {
- size_t size;
-
- if (!__map__is_kmodule(pos))
- continue;
-
- size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
- event->mmap.header.type = PERF_RECORD_MMAP;
- event->mmap.header.size = (sizeof(event->mmap) -
- (sizeof(event->mmap.filename) - size));
- memset(event->mmap.filename + size, 0, machine->id_hdr_size);
- event->mmap.header.size += machine->id_hdr_size;
- event->mmap.start = pos->start;
- event->mmap.len = pos->end - pos->start;
- event->mmap.pid = machine->pid;
-
- memcpy(event->mmap.filename, pos->dso->long_name,
- pos->dso->long_name_len + 1);
- if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
- rc = -1;
- break;
- }
- }
-
- free(event);
- return rc;
-}
-
-static int __event__synthesize_thread(union perf_event *comm_event,
- union perf_event *mmap_event,
- union perf_event *fork_event,
- union perf_event *namespaces_event,
- pid_t pid, int full,
- perf_event__handler_t process,
- struct perf_tool *tool,
- struct machine *machine,
- bool mmap_data)
-{
- char filename[PATH_MAX];
- DIR *tasks;
- struct dirent *dirent;
- pid_t tgid, ppid;
- int rc = 0;
-
- /* special case: only send one comm event using passed in pid */
- if (!full) {
- tgid = perf_event__synthesize_comm(tool, comm_event, pid,
- process, machine);
-
- if (tgid == -1)
- return -1;
-
- if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
- tgid, process, machine) < 0)
- return -1;
-
- /*
- * send mmap only for thread group leader
- * see thread__init_map_groups
- */
- if (pid == tgid &&
- perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
- process, machine, mmap_data))
- return -1;
-
- return 0;
- }
-
- if (machine__is_default_guest(machine))
- return 0;
-
- snprintf(filename, sizeof(filename), "%s/proc/%d/task",
- machine->root_dir, pid);
-
- tasks = opendir(filename);
- if (tasks == NULL) {
- pr_debug("couldn't open %s\n", filename);
- return 0;
- }
-
- while ((dirent = readdir(tasks)) != NULL) {
- char *end;
- pid_t _pid;
-
- _pid = strtol(dirent->d_name, &end, 10);
- if (*end)
- continue;
-
- rc = -1;
- if (perf_event__prepare_comm(comm_event, _pid, machine,
- &tgid, &ppid) != 0)
- break;
-
- if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
- ppid, process, machine) < 0)
- break;
-
- if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
- tgid, process, machine) < 0)
- break;
-
- /*
- * Send the prepared comm event
- */
- if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
- break;
-
- rc = 0;
- if (_pid == pid) {
- /* process the parent's maps too */
- rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
- process, machine, mmap_data);
- if (rc)
- break;
- }
- }
-
- closedir(tasks);
- return rc;
-}
-
-int perf_event__synthesize_thread_map(struct perf_tool *tool,
- struct perf_thread_map *threads,
- perf_event__handler_t process,
- struct machine *machine,
- bool mmap_data)
-{
- union perf_event *comm_event, *mmap_event, *fork_event;
- union perf_event *namespaces_event;
- int err = -1, thread, j;
-
- comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
- if (comm_event == NULL)
- goto out;
-
- mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
- if (mmap_event == NULL)
- goto out_free_comm;
-
- fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
- if (fork_event == NULL)
- goto out_free_mmap;
-
- namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
- (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
- machine->id_hdr_size);
- if (namespaces_event == NULL)
- goto out_free_fork;
-
- err = 0;
- for (thread = 0; thread < threads->nr; ++thread) {
- if (__event__synthesize_thread(comm_event, mmap_event,
- fork_event, namespaces_event,
- perf_thread_map__pid(threads, thread), 0,
- process, tool, machine,
- mmap_data)) {
- err = -1;
- break;
- }
-
- /*
- * comm.pid is set to thread group id by
- * perf_event__synthesize_comm
- */
- if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
- bool need_leader = true;
-
- /* is thread group leader in thread_map? */
- for (j = 0; j < threads->nr; ++j) {
- if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
- need_leader = false;
- break;
- }
- }
-
- /* if not, generate events for it */
- if (need_leader &&
- __event__synthesize_thread(comm_event, mmap_event,
- fork_event, namespaces_event,
- comm_event->comm.pid, 0,
- process, tool, machine,
- mmap_data)) {
- err = -1;
- break;
- }
- }
- }
- free(namespaces_event);
-out_free_fork:
- free(fork_event);
-out_free_mmap:
- free(mmap_event);
-out_free_comm:
- free(comm_event);
-out:
- return err;
-}
-
-static int __perf_event__synthesize_threads(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine,
- bool mmap_data,
- struct dirent **dirent,
- int start,
- int num)
-{
- union perf_event *comm_event, *mmap_event, *fork_event;
- union perf_event *namespaces_event;
- int err = -1;
- char *end;
- pid_t pid;
- int i;
-
- comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
- if (comm_event == NULL)
- goto out;
-
- mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
- if (mmap_event == NULL)
- goto out_free_comm;
-
- fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
- if (fork_event == NULL)
- goto out_free_mmap;
-
- namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
- (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
- machine->id_hdr_size);
- if (namespaces_event == NULL)
- goto out_free_fork;
-
- for (i = start; i < start + num; i++) {
- if (!isdigit(dirent[i]->d_name[0]))
- continue;
-
- pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
- /* only interested in proper numerical dirents */
- if (*end)
- continue;
- /*
- * We may race with exiting thread, so don't stop just because
- * one thread couldn't be synthesized.
- */
- __event__synthesize_thread(comm_event, mmap_event, fork_event,
- namespaces_event, pid, 1, process,
- tool, machine, mmap_data);
- }
- err = 0;
-
- free(namespaces_event);
-out_free_fork:
- free(fork_event);
-out_free_mmap:
- free(mmap_event);
-out_free_comm:
- free(comm_event);
-out:
- return err;
-}
-
-struct synthesize_threads_arg {
- struct perf_tool *tool;
- perf_event__handler_t process;
- struct machine *machine;
- bool mmap_data;
- struct dirent **dirent;
- int num;
- int start;
-};
-
-static void *synthesize_threads_worker(void *arg)
-{
- struct synthesize_threads_arg *args = arg;
-
- __perf_event__synthesize_threads(args->tool, args->process,
- args->machine, args->mmap_data,
- args->dirent,
- args->start, args->num);
- return NULL;
-}
-
-int perf_event__synthesize_threads(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine,
- bool mmap_data,
- unsigned int nr_threads_synthesize)
-{
- struct synthesize_threads_arg *args = NULL;
- pthread_t *synthesize_threads = NULL;
- char proc_path[PATH_MAX];
- struct dirent **dirent;
- int num_per_thread;
- int m, n, i, j;
- int thread_nr;
- int base = 0;
- int err = -1;
-
-
- if (machine__is_default_guest(machine))
- return 0;
-
- snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
- n = scandir(proc_path, &dirent, 0, alphasort);
- if (n < 0)
- return err;
-
- if (nr_threads_synthesize == UINT_MAX)
- thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
- else
- thread_nr = nr_threads_synthesize;
-
- if (thread_nr <= 1) {
- err = __perf_event__synthesize_threads(tool, process,
- machine, mmap_data,
- dirent, base, n);
- goto free_dirent;
- }
- if (thread_nr > n)
- thread_nr = n;
-
- synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
- if (synthesize_threads == NULL)
- goto free_dirent;
-
- args = calloc(sizeof(*args), thread_nr);
- if (args == NULL)
- goto free_threads;
-
- num_per_thread = n / thread_nr;
- m = n % thread_nr;
- for (i = 0; i < thread_nr; i++) {
- args[i].tool = tool;
- args[i].process = process;
- args[i].machine = machine;
- args[i].mmap_data = mmap_data;
- args[i].dirent = dirent;
- }
- for (i = 0; i < m; i++) {
- args[i].num = num_per_thread + 1;
- args[i].start = i * args[i].num;
- }
- if (i != 0)
- base = args[i-1].start + args[i-1].num;
- for (j = i; j < thread_nr; j++) {
- args[j].num = num_per_thread;
- args[j].start = base + (j - i) * args[i].num;
- }
-
- for (i = 0; i < thread_nr; i++) {
- if (pthread_create(&synthesize_threads[i], NULL,
- synthesize_threads_worker, &args[i]))
- goto out_join;
- }
- err = 0;
-out_join:
- for (i = 0; i < thread_nr; i++)
- pthread_join(synthesize_threads[i], NULL);
- free(args);
-free_threads:
- free(synthesize_threads);
-free_dirent:
- for (i = 0; i < n; i++)
- zfree(&dirent[i]);
- free(dirent);
-
- return err;
-}
-
struct process_symbol_args {
const char *name;
u64 start;
@@ -899,327 +117,6 @@ int kallsyms__get_function_start(const char *kallsyms_filename,
return 0;
}
-int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
- perf_event__handler_t process __maybe_unused,
- struct machine *machine __maybe_unused)
-{
- return 0;
-}
-
-static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine)
-{
- size_t size;
- struct map *map = machine__kernel_map(machine);
- struct kmap *kmap;
- int err;
- union perf_event *event;
-
- if (map == NULL)
- return -1;
-
- kmap = map__kmap(map);
- if (!kmap->ref_reloc_sym)
- return -1;
-
- /*
- * We should get this from /sys/kernel/sections/.text, but till that is
- * available use this, and after it is use this as a fallback for older
- * kernels.
- */
- event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
- if (event == NULL) {
- pr_debug("Not enough memory synthesizing mmap event "
- "for kernel modules\n");
- return -1;
- }
-
- if (machine__is_host(machine)) {
- /*
- * kernel uses PERF_RECORD_MISC_USER for user space maps,
- * see kernel/perf_event.c __perf_event_mmap
- */
- event->header.misc = PERF_RECORD_MISC_KERNEL;
- } else {
- event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
- }
-
- size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
- "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
- size = PERF_ALIGN(size, sizeof(u64));
- event->mmap.header.type = PERF_RECORD_MMAP;
- event->mmap.header.size = (sizeof(event->mmap) -
- (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
- event->mmap.pgoff = kmap->ref_reloc_sym->addr;
- event->mmap.start = map->start;
- event->mmap.len = map->end - event->mmap.start;
- event->mmap.pid = machine->pid;
-
- err = perf_tool__process_synth_event(tool, event, machine, process);
- free(event);
-
- return err;
-}
-
-int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine)
-{
- int err;
-
- err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
- if (err < 0)
- return err;
-
- return perf_event__synthesize_extra_kmaps(tool, process, machine);
-}
-
-int perf_event__synthesize_thread_map2(struct perf_tool *tool,
- struct perf_thread_map *threads,
- perf_event__handler_t process,
- struct machine *machine)
-{
- union perf_event *event;
- int i, err, size;
-
- size = sizeof(event->thread_map);
- size += threads->nr * sizeof(event->thread_map.entries[0]);
-
- event = zalloc(size);
- if (!event)
- return -ENOMEM;
-
- event->header.type = PERF_RECORD_THREAD_MAP;
- event->header.size = size;
- event->thread_map.nr = threads->nr;
-
- for (i = 0; i < threads->nr; i++) {
- struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
- char *comm = perf_thread_map__comm(threads, i);
-
- if (!comm)
- comm = (char *) "";
-
- entry->pid = perf_thread_map__pid(threads, i);
- strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
- }
-
- err = process(tool, event, NULL, machine);
-
- free(event);
- return err;
-}
-
-static void synthesize_cpus(struct cpu_map_entries *cpus,
- struct perf_cpu_map *map)
-{
- int i;
-
- cpus->nr = map->nr;
-
- for (i = 0; i < map->nr; i++)
- cpus->cpu[i] = map->map[i];
-}
-
-static void synthesize_mask(struct perf_record_record_cpu_map *mask,
- struct perf_cpu_map *map, int max)
-{
- int i;
-
- mask->nr = BITS_TO_LONGS(max);
- mask->long_size = sizeof(long);
-
- for (i = 0; i < map->nr; i++)
- set_bit(map->map[i], mask->mask);
-}
-
-static size_t cpus_size(struct perf_cpu_map *map)
-{
- return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
-}
-
-static size_t mask_size(struct perf_cpu_map *map, int *max)
-{
- int i;
-
- *max = 0;
-
- for (i = 0; i < map->nr; i++) {
- /* bit possition of the cpu is + 1 */
- int bit = map->map[i] + 1;
-
- if (bit > *max)
- *max = bit;
- }
-
- return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
-}
-
-void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
-{
- size_t size_cpus, size_mask;
- bool is_dummy = perf_cpu_map__empty(map);
-
- /*
- * Both array and mask data have variable size based
- * on the number of cpus and their actual values.
- * The size of the 'struct perf_record_cpu_map_data' is:
- *
- * array = size of 'struct cpu_map_entries' +
- * number of cpus * sizeof(u64)
- *
- * mask = size of 'struct perf_record_record_cpu_map' +
- * maximum cpu bit converted to size of longs
- *
- * and finaly + the size of 'struct perf_record_cpu_map_data'.
- */
- size_cpus = cpus_size(map);
- size_mask = mask_size(map, max);
-
- if (is_dummy || (size_cpus < size_mask)) {
- *size += size_cpus;
- *type = PERF_CPU_MAP__CPUS;
- } else {
- *size += size_mask;
- *type = PERF_CPU_MAP__MASK;
- }
-
- *size += sizeof(struct perf_record_cpu_map_data);
- *size = PERF_ALIGN(*size, sizeof(u64));
- return zalloc(*size);
-}
-
-void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
- u16 type, int max)
-{
- data->type = type;
-
- switch (type) {
- case PERF_CPU_MAP__CPUS:
- synthesize_cpus((struct cpu_map_entries *) data->data, map);
- break;
- case PERF_CPU_MAP__MASK:
- synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
- default:
- break;
- };
-}
-
-static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
-{
- size_t size = sizeof(struct perf_record_cpu_map);
- struct perf_record_cpu_map *event;
- int max;
- u16 type;
-
- event = cpu_map_data__alloc(map, &size, &type, &max);
- if (!event)
- return NULL;
-
- event->header.type = PERF_RECORD_CPU_MAP;
- event->header.size = size;
- event->data.type = type;
-
- cpu_map_data__synthesize(&event->data, map, type, max);
- return event;
-}
-
-int perf_event__synthesize_cpu_map(struct perf_tool *tool,
- struct perf_cpu_map *map,
- perf_event__handler_t process,
- struct machine *machine)
-{
- struct perf_record_cpu_map *event;
- int err;
-
- event = cpu_map_event__new(map);
- if (!event)
- return -ENOMEM;
-
- err = process(tool, (union perf_event *) event, NULL, machine);
-
- free(event);
- return err;
-}
-
-int perf_event__synthesize_stat_config(struct perf_tool *tool,
- struct perf_stat_config *config,
- perf_event__handler_t process,
- struct machine *machine)
-{
- struct perf_record_stat_config *event;
- int size, i = 0, err;
-
- size = sizeof(*event);
- size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
-
- event = zalloc(size);
- if (!event)
- return -ENOMEM;
-
- event->header.type = PERF_RECORD_STAT_CONFIG;
- event->header.size = size;
- event->nr = PERF_STAT_CONFIG_TERM__MAX;
-
-#define ADD(__term, __val) \
- event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \
- event->data[i].val = __val; \
- i++;
-
- ADD(AGGR_MODE, config->aggr_mode)
- ADD(INTERVAL, config->interval)
- ADD(SCALE, config->scale)
-
- WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
- "stat config terms unbalanced\n");
-#undef ADD
-
- err = process(tool, (union perf_event *) event, NULL, machine);
-
- free(event);
- return err;
-}
-
-int perf_event__synthesize_stat(struct perf_tool *tool,
- u32 cpu, u32 thread, u64 id,
- struct perf_counts_values *count,
- perf_event__handler_t process,
- struct machine *machine)
-{
- struct perf_record_stat event;
-
- event.header.type = PERF_RECORD_STAT;
- event.header.size = sizeof(event);
- event.header.misc = 0;
-
- event.id = id;
- event.cpu = cpu;
- event.thread = thread;
- event.val = count->val;
- event.ena = count->ena;
- event.run = count->run;
-
- return process(tool, (union perf_event *) &event, NULL, machine);
-}
-
-int perf_event__synthesize_stat_round(struct perf_tool *tool,
- u64 evtime, u64 type,
- perf_event__handler_t process,
- struct machine *machine)
-{
- struct perf_record_stat_round event;
-
- event.header.type = PERF_RECORD_STAT_ROUND;
- event.header.size = sizeof(event);
- event.header.misc = 0;
-
- event.time = evtime;
- event.type = type;
-
- return process(tool, (union perf_event *) &event, NULL, machine);
-}
-
void perf_event__read_stat_config(struct perf_stat_config *config,
struct perf_record_stat_config *event)
{
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 47ad81d47b1a..a0a0c91cde4a 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -279,54 +279,13 @@ enum {
void perf_event__print_totals(void);
-struct perf_tool;
-struct perf_thread_map;
struct perf_cpu_map;
+struct perf_record_stat_config;
struct perf_stat_config;
-struct perf_counts_values;
-
-typedef int (*perf_event__handler_t)(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample,
- struct machine *machine);
+struct perf_tool;
-int perf_event__synthesize_thread_map(struct perf_tool *tool,
- struct perf_thread_map *threads,
- perf_event__handler_t process,
- struct machine *machine, bool mmap_data);
-int perf_event__synthesize_thread_map2(struct perf_tool *tool,
- struct perf_thread_map *threads,
- perf_event__handler_t process,
- struct machine *machine);
-int perf_event__synthesize_cpu_map(struct perf_tool *tool,
- struct perf_cpu_map *cpus,
- perf_event__handler_t process,
- struct machine *machine);
-int perf_event__synthesize_threads(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine, bool mmap_data,
- unsigned int nr_threads_synthesize);
-int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine);
-int perf_event__synthesize_stat_config(struct perf_tool *tool,
- struct perf_stat_config *config,
- perf_event__handler_t process,
- struct machine *machine);
void perf_event__read_stat_config(struct perf_stat_config *config,
struct perf_record_stat_config *event);
-int perf_event__synthesize_stat(struct perf_tool *tool,
- u32 cpu, u32 thread, u64 id,
- struct perf_counts_values *count,
- perf_event__handler_t process,
- struct machine *machine);
-int perf_event__synthesize_stat_round(struct perf_tool *tool,
- u64 time, u64 type,
- perf_event__handler_t process,
- struct machine *machine);
-int perf_event__synthesize_modules(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine);
int perf_event__process_comm(struct perf_tool *tool,
union perf_event *event,
@@ -380,10 +339,6 @@ int perf_event__process_bpf(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_tool__process_synth_event(struct perf_tool *tool,
- union perf_event *event,
- struct machine *machine,
- perf_event__handler_t process);
int perf_event__process(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -405,34 +360,6 @@ void thread__resolve(struct thread *thread, struct addr_location *al,
const char *perf_event__name(unsigned int id);
-size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
- u64 read_format);
-int perf_event__synthesize_sample(union perf_event *event, u64 type,
- u64 read_format,
- const struct perf_sample *sample);
-
-pid_t perf_event__synthesize_comm(struct perf_tool *tool,
- union perf_event *event, pid_t pid,
- perf_event__handler_t process,
- struct machine *machine);
-
-int perf_event__synthesize_namespaces(struct perf_tool *tool,
- union perf_event *event,
- pid_t pid, pid_t tgid,
- perf_event__handler_t process,
- struct machine *machine);
-
-int perf_event__synthesize_mmap_events(struct perf_tool *tool,
- union perf_event *event,
- pid_t pid, pid_t tgid,
- perf_event__handler_t process,
- struct machine *machine,
- bool mmap_data);
-
-int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine);
-
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 095924aa186b..d277a98e62df 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -10,13 +10,14 @@
#include <inttypes.h>
#include <poll.h>
#include "cpumap.h"
+#include "util/mmap.h"
#include "thread_map.h"
#include "target.h"
#include "evlist.h"
#include "evsel.h"
#include "debug.h"
#include "units.h"
-#include "util.h"
+#include <internal/lib.h> // page_size
#include "../perf.h"
#include "asm/bug.h"
#include "bpf-event.h"
@@ -49,18 +50,14 @@ int sigqueue(pid_t pid, int sig, const union sigval value);
#endif
#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
-#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
+#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
struct perf_thread_map *threads)
{
- int i;
-
- for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
- INIT_HLIST_HEAD(&evlist->heads[i]);
perf_evlist__init(&evlist->core);
perf_evlist__set_maps(&evlist->core, cpus, threads);
- fdarray__init(&evlist->pollfd, 64);
+ fdarray__init(&evlist->core.pollfd, 64);
evlist->workload.pid = -1;
evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
}
@@ -108,7 +105,7 @@ struct evlist *perf_evlist__new_dummy(void)
*/
void perf_evlist__set_id_pos(struct evlist *evlist)
{
- struct evsel *first = perf_evlist__first(evlist);
+ struct evsel *first = evlist__first(evlist);
evlist->id_pos = first->id_pos;
evlist->is_pos = first->is_pos;
@@ -124,7 +121,7 @@ static void perf_evlist__update_id_pos(struct evlist *evlist)
perf_evlist__set_id_pos(evlist);
}
-static void perf_evlist__purge(struct evlist *evlist)
+static void evlist__purge(struct evlist *evlist)
{
struct evsel *pos, *n;
@@ -137,11 +134,11 @@ static void perf_evlist__purge(struct evlist *evlist)
evlist->core.nr_entries = 0;
}
-void perf_evlist__exit(struct evlist *evlist)
+void evlist__exit(struct evlist *evlist)
{
zfree(&evlist->mmap);
zfree(&evlist->overwrite_mmap);
- fdarray__exit(&evlist->pollfd);
+ fdarray__exit(&evlist->core.pollfd);
}
void evlist__delete(struct evlist *evlist)
@@ -149,14 +146,14 @@ void evlist__delete(struct evlist *evlist)
if (evlist == NULL)
return;
- perf_evlist__munmap(evlist);
+ evlist__munmap(evlist);
evlist__close(evlist);
perf_cpu_map__put(evlist->core.cpus);
perf_thread_map__put(evlist->core.threads);
evlist->core.cpus = NULL;
evlist->core.threads = NULL;
- perf_evlist__purge(evlist);
- perf_evlist__exit(evlist);
+ evlist__purge(evlist);
+ evlist__exit(evlist);
free(evlist);
}
@@ -318,7 +315,7 @@ int perf_evlist__add_newtp(struct evlist *evlist,
static int perf_evlist__nr_threads(struct evlist *evlist,
struct evsel *evsel)
{
- if (evsel->system_wide)
+ if (evsel->core.system_wide)
return 1;
else
return perf_thread_map__nr(evlist->core.threads);
@@ -401,128 +398,29 @@ int perf_evlist__enable_event_idx(struct evlist *evlist,
return perf_evlist__enable_event_thread(evlist, evsel, idx);
}
-int perf_evlist__alloc_pollfd(struct evlist *evlist)
+int evlist__add_pollfd(struct evlist *evlist, int fd)
{
- int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
- int nr_threads = perf_thread_map__nr(evlist->core.threads);
- int nfds = 0;
- struct evsel *evsel;
-
- evlist__for_each_entry(evlist, evsel) {
- if (evsel->system_wide)
- nfds += nr_cpus;
- else
- nfds += nr_cpus * nr_threads;
- }
-
- if (fdarray__available_entries(&evlist->pollfd) < nfds &&
- fdarray__grow(&evlist->pollfd, nfds) < 0)
- return -ENOMEM;
-
- return 0;
-}
-
-static int __perf_evlist__add_pollfd(struct evlist *evlist, int fd,
- struct perf_mmap *map, short revent)
-{
- int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
- /*
- * Save the idx so that when we filter out fds POLLHUP'ed we can
- * close the associated evlist->mmap[] entry.
- */
- if (pos >= 0) {
- evlist->pollfd.priv[pos].ptr = map;
-
- fcntl(fd, F_SETFL, O_NONBLOCK);
- }
-
- return pos;
-}
-
-int perf_evlist__add_pollfd(struct evlist *evlist, int fd)
-{
- return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
+ return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN);
}
static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
void *arg __maybe_unused)
{
- struct perf_mmap *map = fda->priv[fd].ptr;
+ struct mmap *map = fda->priv[fd].ptr;
if (map)
perf_mmap__put(map);
}
-int perf_evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
+int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
{
- return fdarray__filter(&evlist->pollfd, revents_and_mask,
+ return fdarray__filter(&evlist->core.pollfd, revents_and_mask,
perf_evlist__munmap_filtered, NULL);
}
-int perf_evlist__poll(struct evlist *evlist, int timeout)
+int evlist__poll(struct evlist *evlist, int timeout)
{
- return fdarray__poll(&evlist->pollfd, timeout);
-}
-
-static void perf_evlist__id_hash(struct evlist *evlist,
- struct evsel *evsel,
- int cpu, int thread, u64 id)
-{
- int hash;
- struct perf_sample_id *sid = SID(evsel, cpu, thread);
-
- sid->id = id;
- sid->evsel = evsel;
- hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
- hlist_add_head(&sid->node, &evlist->heads[hash]);
-}
-
-void perf_evlist__id_add(struct evlist *evlist, struct evsel *evsel,
- int cpu, int thread, u64 id)
-{
- perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
- evsel->id[evsel->ids++] = id;
-}
-
-int perf_evlist__id_add_fd(struct evlist *evlist,
- struct evsel *evsel,
- int cpu, int thread, int fd)
-{
- u64 read_data[4] = { 0, };
- int id_idx = 1; /* The first entry is the counter value */
- u64 id;
- int ret;
-
- ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
- if (!ret)
- goto add;
-
- if (errno != ENOTTY)
- return -1;
-
- /* Legacy way to get event id.. All hail to old kernels! */
-
- /*
- * This way does not work with group format read, so bail
- * out in that case.
- */
- if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
- return -1;
-
- if (!(evsel->core.attr.read_format & PERF_FORMAT_ID) ||
- read(fd, &read_data, sizeof(read_data)) == -1)
- return -1;
-
- if (evsel->core.attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
- ++id_idx;
- if (evsel->core.attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
- ++id_idx;
-
- id = read_data[id_idx];
-
- add:
- perf_evlist__id_add(evlist, evsel, cpu, thread, id);
- return 0;
+ return perf_evlist__poll(&evlist->core, timeout);
}
static void perf_evlist__set_sid_idx(struct evlist *evlist,
@@ -535,7 +433,7 @@ static void perf_evlist__set_sid_idx(struct evlist *evlist,
sid->cpu = evlist->core.cpus->map[cpu];
else
sid->cpu = -1;
- if (!evsel->system_wide && evlist->core.threads && thread >= 0)
+ if (!evsel->core.system_wide && evlist->core.threads && thread >= 0)
sid->tid = perf_thread_map__pid(evlist->core.threads, thread);
else
sid->tid = -1;
@@ -548,7 +446,7 @@ struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id)
int hash;
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
- head = &evlist->heads[hash];
+ head = &evlist->core.heads[hash];
hlist_for_each_entry(sid, head, node)
if (sid->id == id)
@@ -562,14 +460,14 @@ struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id)
struct perf_sample_id *sid;
if (evlist->core.nr_entries == 1 || !id)
- return perf_evlist__first(evlist);
+ return evlist__first(evlist);
sid = perf_evlist__id2sid(evlist, id);
if (sid)
- return sid->evsel;
+ return container_of(sid->evsel, struct evsel, core);
if (!perf_evlist__sample_id_all(evlist))
- return perf_evlist__first(evlist);
+ return evlist__first(evlist);
return NULL;
}
@@ -584,7 +482,7 @@ struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
sid = perf_evlist__id2sid(evlist, id);
if (sid)
- return sid->evsel;
+ return container_of(sid->evsel, struct evsel, core);
return NULL;
}
@@ -613,7 +511,7 @@ static int perf_evlist__event2id(struct evlist *evlist,
struct evsel *perf_evlist__event2evsel(struct evlist *evlist,
union perf_event *event)
{
- struct evsel *first = perf_evlist__first(evlist);
+ struct evsel *first = evlist__first(evlist);
struct hlist_head *head;
struct perf_sample_id *sid;
int hash;
@@ -634,11 +532,11 @@ struct evsel *perf_evlist__event2evsel(struct evlist *evlist,
return first;
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
- head = &evlist->heads[hash];
+ head = &evlist->core.heads[hash];
hlist_for_each_entry(sid, head, node) {
if (sid->id == id)
- return sid->evsel;
+ return container_of(sid->evsel, struct evsel, core);
}
return NULL;
}
@@ -650,8 +548,8 @@ static int perf_evlist__set_paused(struct evlist *evlist, bool value)
if (!evlist->overwrite_mmap)
return 0;
- for (i = 0; i < evlist->nr_mmaps; i++) {
- int fd = evlist->overwrite_mmap[i].fd;
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ int fd = evlist->overwrite_mmap[i].core.fd;
int err;
if (fd < 0)
@@ -673,42 +571,42 @@ static int perf_evlist__resume(struct evlist *evlist)
return perf_evlist__set_paused(evlist, false);
}
-static void perf_evlist__munmap_nofree(struct evlist *evlist)
+static void evlist__munmap_nofree(struct evlist *evlist)
{
int i;
if (evlist->mmap)
- for (i = 0; i < evlist->nr_mmaps; i++)
+ for (i = 0; i < evlist->core.nr_mmaps; i++)
perf_mmap__munmap(&evlist->mmap[i]);
if (evlist->overwrite_mmap)
- for (i = 0; i < evlist->nr_mmaps; i++)
+ for (i = 0; i < evlist->core.nr_mmaps; i++)
perf_mmap__munmap(&evlist->overwrite_mmap[i]);
}
-void perf_evlist__munmap(struct evlist *evlist)
+void evlist__munmap(struct evlist *evlist)
{
- perf_evlist__munmap_nofree(evlist);
+ evlist__munmap_nofree(evlist);
zfree(&evlist->mmap);
zfree(&evlist->overwrite_mmap);
}
-static struct perf_mmap *perf_evlist__alloc_mmap(struct evlist *evlist,
- bool overwrite)
+static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
+ bool overwrite)
{
int i;
- struct perf_mmap *map;
+ struct mmap *map;
- evlist->nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
+ evlist->core.nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
if (perf_cpu_map__empty(evlist->core.cpus))
- evlist->nr_mmaps = perf_thread_map__nr(evlist->core.threads);
- map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
+ evlist->core.nr_mmaps = perf_thread_map__nr(evlist->core.threads);
+ map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
if (!map)
return NULL;
- for (i = 0; i < evlist->nr_mmaps; i++) {
- map[i].fd = -1;
- map[i].overwrite = overwrite;
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ map[i].core.fd = -1;
+ map[i].core.overwrite = overwrite;
/*
* When the perf_mmap() call is made we grab one refcount, plus
* one extra to let perf_mmap__consume() get the last
@@ -718,7 +616,7 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct evlist *evlist,
* Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
* thus does perf_mmap__get() on it.
*/
- refcount_set(&map[i].refcnt, 0);
+ refcount_set(&map[i].core.refcnt, 0);
}
return map;
}
@@ -732,7 +630,7 @@ perf_evlist__should_poll(struct evlist *evlist __maybe_unused,
return true;
}
-static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
+static int evlist__mmap_per_evsel(struct evlist *evlist, int idx,
struct mmap_params *mp, int cpu_idx,
int thread, int *_output, int *_output_overwrite)
{
@@ -741,7 +639,7 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
int evlist_cpu = cpu_map__cpu(evlist->core.cpus, cpu_idx);
evlist__for_each_entry(evlist, evsel) {
- struct perf_mmap *maps = evlist->mmap;
+ struct mmap *maps = evlist->mmap;
int *output = _output;
int fd;
int cpu;
@@ -752,7 +650,7 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
maps = evlist->overwrite_mmap;
if (!maps) {
- maps = perf_evlist__alloc_mmap(evlist, true);
+ maps = evlist__alloc_mmap(evlist, true);
if (!maps)
return -1;
evlist->overwrite_mmap = maps;
@@ -762,7 +660,7 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
mp->prot &= ~PROT_WRITE;
}
- if (evsel->system_wide && thread)
+ if (evsel->core.system_wide && thread)
continue;
cpu = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu);
@@ -792,14 +690,14 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
* other events, so it should not need to be polled anyway.
* Therefore don't add it for polling.
*/
- if (!evsel->system_wide &&
- __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
+ if (!evsel->core.system_wide &&
+ perf_evlist__add_pollfd(&evlist->core, fd, &maps[idx], revent) < 0) {
perf_mmap__put(&maps[idx]);
return -1;
}
if (evsel->core.attr.read_format & PERF_FORMAT_ID) {
- if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
+ if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, cpu, thread,
fd) < 0)
return -1;
perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
@@ -810,7 +708,7 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
return 0;
}
-static int perf_evlist__mmap_per_cpu(struct evlist *evlist,
+static int evlist__mmap_per_cpu(struct evlist *evlist,
struct mmap_params *mp)
{
int cpu, thread;
@@ -826,7 +724,7 @@ static int perf_evlist__mmap_per_cpu(struct evlist *evlist,
true);
for (thread = 0; thread < nr_threads; thread++) {
- if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
+ if (evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
thread, &output, &output_overwrite))
goto out_unmap;
}
@@ -835,11 +733,11 @@ static int perf_evlist__mmap_per_cpu(struct evlist *evlist,
return 0;
out_unmap:
- perf_evlist__munmap_nofree(evlist);
+ evlist__munmap_nofree(evlist);
return -1;
}
-static int perf_evlist__mmap_per_thread(struct evlist *evlist,
+static int evlist__mmap_per_thread(struct evlist *evlist,
struct mmap_params *mp)
{
int thread;
@@ -853,7 +751,7 @@ static int perf_evlist__mmap_per_thread(struct evlist *evlist,
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
false);
- if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
+ if (evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
&output, &output_overwrite))
goto out_unmap;
}
@@ -861,7 +759,7 @@ static int perf_evlist__mmap_per_thread(struct evlist *evlist,
return 0;
out_unmap:
- perf_evlist__munmap_nofree(evlist);
+ evlist__munmap_nofree(evlist);
return -1;
}
@@ -888,7 +786,7 @@ unsigned long perf_event_mlock_kb_in_pages(void)
return pages;
}
-size_t perf_evlist__mmap_size(unsigned long pages)
+size_t evlist__mmap_size(unsigned long pages)
{
if (pages == UINT_MAX)
pages = perf_event_mlock_kb_in_pages();
@@ -971,7 +869,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
}
/**
- * perf_evlist__mmap_ex - Create mmaps to receive events.
+ * evlist__mmap_ex - Create mmaps to receive events.
* @evlist: list of events
* @pages: map length in pages
* @overwrite: overwrite older events?
@@ -979,7 +877,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
* @auxtrace_overwrite - overwrite older auxtrace data?
*
* If @overwrite is %false the user needs to signal event consumption using
- * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
+ * perf_mmap__write_tail(). Using evlist__mmap_read() does this
* automatically.
*
* Similarly, if @auxtrace_overwrite is %false the user needs to signal data
@@ -987,7 +885,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
*
* Return: %0 on success, negative error code otherwise.
*/
-int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
+int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
int comp_level)
@@ -1004,36 +902,36 @@ int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
.comp_level = comp_level };
if (!evlist->mmap)
- evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
+ evlist->mmap = evlist__alloc_mmap(evlist, false);
if (!evlist->mmap)
return -ENOMEM;
- if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
+ if (evlist->core.pollfd.entries == NULL && perf_evlist__alloc_pollfd(&evlist->core) < 0)
return -ENOMEM;
- evlist->mmap_len = perf_evlist__mmap_size(pages);
- pr_debug("mmap size %zuB\n", evlist->mmap_len);
- mp.mask = evlist->mmap_len - page_size - 1;
+ evlist->core.mmap_len = evlist__mmap_size(pages);
+ pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
+ mp.mask = evlist->core.mmap_len - page_size - 1;
- auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
+ auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
auxtrace_pages, auxtrace_overwrite);
evlist__for_each_entry(evlist, evsel) {
if ((evsel->core.attr.read_format & PERF_FORMAT_ID) &&
- evsel->sample_id == NULL &&
- perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
+ evsel->core.sample_id == NULL &&
+ perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr) < 0)
return -ENOMEM;
}
if (perf_cpu_map__empty(cpus))
- return perf_evlist__mmap_per_thread(evlist, &mp);
+ return evlist__mmap_per_thread(evlist, &mp);
- return perf_evlist__mmap_per_cpu(evlist, &mp);
+ return evlist__mmap_per_cpu(evlist, &mp);
}
-int perf_evlist__mmap(struct evlist *evlist, unsigned int pages)
+int evlist__mmap(struct evlist *evlist, unsigned int pages)
{
- return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
+ return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
}
int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
@@ -1225,7 +1123,7 @@ u64 perf_evlist__combined_branch_type(struct evlist *evlist)
bool perf_evlist__valid_read_format(struct evlist *evlist)
{
- struct evsel *first = perf_evlist__first(evlist), *pos = first;
+ struct evsel *first = evlist__first(evlist), *pos = first;
u64 read_format = first->core.attr.read_format;
u64 sample_type = first->core.attr.sample_type;
@@ -1243,15 +1141,9 @@ bool perf_evlist__valid_read_format(struct evlist *evlist)
return true;
}
-u64 perf_evlist__read_format(struct evlist *evlist)
-{
- struct evsel *first = perf_evlist__first(evlist);
- return first->core.attr.read_format;
-}
-
u16 perf_evlist__id_hdr_size(struct evlist *evlist)
{
- struct evsel *first = perf_evlist__first(evlist);
+ struct evsel *first = evlist__first(evlist);
struct perf_sample *data;
u64 sample_type;
u16 size = 0;
@@ -1284,7 +1176,7 @@ out:
bool perf_evlist__valid_sample_id_all(struct evlist *evlist)
{
- struct evsel *first = perf_evlist__first(evlist), *pos = first;
+ struct evsel *first = evlist__first(evlist), *pos = first;
evlist__for_each_entry_continue(evlist, pos) {
if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
@@ -1296,7 +1188,7 @@ bool perf_evlist__valid_sample_id_all(struct evlist *evlist)
bool perf_evlist__sample_id_all(struct evlist *evlist)
{
- struct evsel *first = perf_evlist__first(evlist);
+ struct evsel *first = evlist__first(evlist);
return first->core.attr.sample_id_all;
}
@@ -1529,19 +1421,6 @@ int perf_evlist__parse_sample_timestamp(struct evlist *evlist,
return perf_evsel__parse_sample_timestamp(evsel, event, timestamp);
}
-size_t perf_evlist__fprintf(struct evlist *evlist, FILE *fp)
-{
- struct evsel *evsel;
- size_t printed = 0;
-
- evlist__for_each_entry(evlist, evsel) {
- printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
- perf_evsel__name(evsel));
- }
-
- return printed + fprintf(fp, "\n");
-}
-
int perf_evlist__strerror_open(struct evlist *evlist,
int err, char *buf, size_t size)
{
@@ -1571,7 +1450,7 @@ int perf_evlist__strerror_open(struct evlist *evlist,
"Hint:\tThe current value is %d.", value);
break;
case EINVAL: {
- struct evsel *first = perf_evlist__first(evlist);
+ struct evsel *first = evlist__first(evlist);
int max_freq;
if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
@@ -1599,7 +1478,7 @@ out_default:
int perf_evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
{
char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
- int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
+ int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
switch (err) {
case EPERM:
@@ -1633,7 +1512,7 @@ void perf_evlist__to_front(struct evlist *evlist,
struct evsel *evsel, *n;
LIST_HEAD(move);
- if (move_evsel == perf_evlist__first(evlist))
+ if (move_evsel == evlist__first(evlist))
return;
evlist__for_each_entry_safe(evlist, n, evsel) {
@@ -1754,7 +1633,7 @@ bool perf_evlist__exclude_kernel(struct evlist *evlist)
void perf_evlist__force_leader(struct evlist *evlist)
{
if (!evlist->nr_groups) {
- struct evsel *leader = perf_evlist__first(evlist);
+ struct evsel *leader = evlist__first(evlist);
perf_evlist__set_leader(evlist);
leader->forced_leader = true;
@@ -1780,7 +1659,7 @@ struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
is_open = false;
if (c2->leader == leader) {
if (is_open)
- evsel__close(c2);
+ perf_evsel__close(&evsel->core);
c2->leader = c2;
c2->core.nr_members = 0;
}
@@ -1844,10 +1723,10 @@ static void *perf_evlist__poll_thread(void *arg)
draining = true;
if (!draining)
- perf_evlist__poll(evlist, 1000);
+ evlist__poll(evlist, 1000);
- for (i = 0; i < evlist->nr_mmaps; i++) {
- struct perf_mmap *map = &evlist->mmap[i];
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ struct mmap *map = &evlist->mmap[i];
union perf_event *event;
if (perf_mmap__read_init(map))
@@ -1889,7 +1768,7 @@ int perf_evlist__start_sb_thread(struct evlist *evlist,
goto out_delete_evlist;
}
- if (perf_evlist__mmap(evlist, UINT_MAX))
+ if (evlist__mmap(evlist, UINT_MAX))
goto out_delete_evlist;
evlist__for_each_entry(evlist, counter) {
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index a55f0f2546e5..7cfe75522ba5 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -7,11 +7,11 @@
#include <linux/refcount.h>
#include <linux/list.h>
#include <api/fd/array.h>
-#include <stdio.h>
#include <internal/evlist.h>
+#include <internal/evsel.h>
#include "events_stats.h"
#include "evsel.h"
-#include "mmap.h"
+#include <pthread.h>
#include <signal.h>
#include <unistd.h>
@@ -20,16 +20,38 @@ struct thread_map;
struct perf_cpu_map;
struct record_opts;
-#define PERF_EVLIST__HLIST_BITS 8
-#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
+/*
+ * State machine of bkw_mmap_state:
+ *
+ * .________________(forbid)_____________.
+ * | V
+ * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
+ * ^ ^ | ^ |
+ * | |__(forbid)____/ |___(forbid)___/|
+ * | |
+ * \_________________(3)_______________/
+ *
+ * NOTREADY : Backward ring buffers are not ready
+ * RUNNING : Backward ring buffers are recording
+ * DATA_PENDING : We are required to collect data from backward ring buffers
+ * EMPTY : We have collected data from backward ring buffers.
+ *
+ * (0): Setup backward ring buffer
+ * (1): Pause ring buffers for reading
+ * (2): Read from ring buffers
+ * (3): Resume ring buffers for recording
+ */
+enum bkw_mmap_state {
+ BKW_MMAP_NOTREADY,
+ BKW_MMAP_RUNNING,
+ BKW_MMAP_DATA_PENDING,
+ BKW_MMAP_EMPTY,
+};
struct evlist {
struct perf_evlist core;
- struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
int nr_groups;
- int nr_mmaps;
bool enabled;
- size_t mmap_len;
int id_pos;
int is_pos;
u64 combined_sample_type;
@@ -38,9 +60,8 @@ struct evlist {
int cork_fd;
pid_t pid;
} workload;
- struct fdarray pollfd;
- struct perf_mmap *mmap;
- struct perf_mmap *overwrite_mmap;
+ struct mmap *mmap;
+ struct mmap *overwrite_mmap;
struct evsel *selected;
struct events_stats stats;
struct perf_env *env;
@@ -65,7 +86,7 @@ struct evlist *perf_evlist__new_default(void);
struct evlist *perf_evlist__new_dummy(void);
void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
struct perf_thread_map *threads);
-void perf_evlist__exit(struct evlist *evlist);
+void evlist__exit(struct evlist *evlist);
void evlist__delete(struct evlist *evlist);
void evlist__add(struct evlist *evlist, struct evsel *entry);
@@ -119,17 +140,10 @@ struct evsel *
perf_evlist__find_tracepoint_by_name(struct evlist *evlist,
const char *name);
-void perf_evlist__id_add(struct evlist *evlist, struct evsel *evsel,
- int cpu, int thread, u64 id);
-int perf_evlist__id_add_fd(struct evlist *evlist,
- struct evsel *evsel,
- int cpu, int thread, int fd);
-
-int perf_evlist__add_pollfd(struct evlist *evlist, int fd);
-int perf_evlist__alloc_pollfd(struct evlist *evlist);
-int perf_evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask);
+int evlist__add_pollfd(struct evlist *evlist, int fd);
+int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask);
-int perf_evlist__poll(struct evlist *evlist, int timeout);
+int evlist__poll(struct evlist *evlist, int timeout);
struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id);
struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
@@ -139,7 +153,7 @@ struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id);
void perf_evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state);
-void perf_evlist__mmap_consume(struct evlist *evlist, int idx);
+void evlist__mmap_consume(struct evlist *evlist, int idx);
int evlist__open(struct evlist *evlist);
void evlist__close(struct evlist *evlist);
@@ -170,14 +184,14 @@ int perf_evlist__parse_mmap_pages(const struct option *opt,
unsigned long perf_event_mlock_kb_in_pages(void);
-int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
+int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
bool auxtrace_overwrite, int nr_cblocks,
int affinity, int flush, int comp_level);
-int perf_evlist__mmap(struct evlist *evlist, unsigned int pages);
-void perf_evlist__munmap(struct evlist *evlist);
+int evlist__mmap(struct evlist *evlist, unsigned int pages);
+void evlist__munmap(struct evlist *evlist);
-size_t perf_evlist__mmap_size(unsigned long pages);
+size_t evlist__mmap_size(unsigned long pages);
void evlist__disable(struct evlist *evlist);
void evlist__enable(struct evlist *evlist);
@@ -195,7 +209,6 @@ int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel);
void __perf_evlist__set_leader(struct list_head *list);
void perf_evlist__set_leader(struct evlist *evlist);
-u64 perf_evlist__read_format(struct evlist *evlist);
u64 __perf_evlist__combined_sample_type(struct evlist *evlist);
u64 perf_evlist__combined_sample_type(struct evlist *evlist);
u64 perf_evlist__combined_branch_type(struct evlist *evlist);
@@ -221,17 +234,19 @@ static inline bool perf_evlist__empty(struct evlist *evlist)
return list_empty(&evlist->core.entries);
}
-static inline struct evsel *perf_evlist__first(struct evlist *evlist)
+static inline struct evsel *evlist__first(struct evlist *evlist)
{
- return list_entry(evlist->core.entries.next, struct evsel, core.node);
+ struct perf_evsel *evsel = perf_evlist__first(&evlist->core);
+
+ return container_of(evsel, struct evsel, core);
}
-static inline struct evsel *perf_evlist__last(struct evlist *evlist)
+static inline struct evsel *evlist__last(struct evlist *evlist)
{
- return list_entry(evlist->core.entries.prev, struct evsel, core.node);
-}
+ struct perf_evsel *evsel = perf_evlist__last(&evlist->core);
-size_t perf_evlist__fprintf(struct evlist *evlist, FILE *fp);
+ return container_of(evsel, struct evsel, core);
+}
int perf_evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size);
int perf_evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 85825384f9e8..abc7fda4a0fe 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -30,8 +30,11 @@
#include "counts.h"
#include "event.h"
#include "evsel.h"
+#include "util/env.h"
+#include "util/evsel_config.h"
+#include "util/evsel_fprintf.h"
#include "evlist.h"
-#include "cpumap.h"
+#include <perf/cpumap.h>
#include "thread_map.h"
#include "target.h"
#include "perf_regs.h"
@@ -45,6 +48,7 @@
#include "../perf-sys.h"
#include "util/parse-branch-options.h"
#include <internal/xyarray.h>
+#include <internal/lib.h>
#include <linux/ctype.h>
@@ -1226,36 +1230,6 @@ int evsel__disable(struct evsel *evsel)
return err;
}
-int perf_evsel__alloc_id(struct evsel *evsel, int ncpus, int nthreads)
-{
- if (ncpus == 0 || nthreads == 0)
- return 0;
-
- if (evsel->system_wide)
- nthreads = 1;
-
- evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
- if (evsel->sample_id == NULL)
- return -ENOMEM;
-
- evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
- if (evsel->id == NULL) {
- xyarray__delete(evsel->sample_id);
- evsel->sample_id = NULL;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void perf_evsel__free_id(struct evsel *evsel)
-{
- xyarray__delete(evsel->sample_id);
- evsel->sample_id = NULL;
- zfree(&evsel->id);
- evsel->ids = 0;
-}
-
static void perf_evsel__free_config_terms(struct evsel *evsel)
{
struct perf_evsel_config_term *term, *h;
@@ -1272,7 +1246,7 @@ void perf_evsel__exit(struct evsel *evsel)
assert(evsel->evlist == NULL);
perf_evsel__free_counts(evsel);
perf_evsel__free_fd(&evsel->core);
- perf_evsel__free_id(evsel);
+ perf_evsel__free_id(&evsel->core);
perf_evsel__free_config_terms(evsel);
cgroup__put(evsel->cgrp);
perf_cpu_map__put(evsel->core.cpus);
@@ -1472,152 +1446,6 @@ static int get_group_fd(struct evsel *evsel, int cpu, int thread)
return fd;
}
-struct bit_names {
- int bit;
- const char *name;
-};
-
-static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
-{
- bool first_bit = true;
- int i = 0;
-
- do {
- if (value & bits[i].bit) {
- buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
- first_bit = false;
- }
- } while (bits[++i].name != NULL);
-}
-
-static void __p_sample_type(char *buf, size_t size, u64 value)
-{
-#define bit_name(n) { PERF_SAMPLE_##n, #n }
- struct bit_names bits[] = {
- bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
- bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
- bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
- bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
- bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
- bit_name(WEIGHT), bit_name(PHYS_ADDR),
- { .name = NULL, }
- };
-#undef bit_name
- __p_bits(buf, size, value, bits);
-}
-
-static void __p_branch_sample_type(char *buf, size_t size, u64 value)
-{
-#define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
- struct bit_names bits[] = {
- bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY),
- bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL),
- bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
- bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
- bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
- { .name = NULL, }
- };
-#undef bit_name
- __p_bits(buf, size, value, bits);
-}
-
-static void __p_read_format(char *buf, size_t size, u64 value)
-{
-#define bit_name(n) { PERF_FORMAT_##n, #n }
- struct bit_names bits[] = {
- bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
- bit_name(ID), bit_name(GROUP),
- { .name = NULL, }
- };
-#undef bit_name
- __p_bits(buf, size, value, bits);
-}
-
-#define BUF_SIZE 1024
-
-#define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
-#define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
-#define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
-#define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
-#define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
-#define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
-
-#define PRINT_ATTRn(_n, _f, _p) \
-do { \
- if (attr->_f) { \
- _p(attr->_f); \
- ret += attr__fprintf(fp, _n, buf, priv);\
- } \
-} while (0)
-
-#define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
-
-int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
- attr__fprintf_f attr__fprintf, void *priv)
-{
- char buf[BUF_SIZE];
- int ret = 0;
-
- PRINT_ATTRf(type, p_unsigned);
- PRINT_ATTRf(size, p_unsigned);
- PRINT_ATTRf(config, p_hex);
- PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
- PRINT_ATTRf(sample_type, p_sample_type);
- PRINT_ATTRf(read_format, p_read_format);
-
- PRINT_ATTRf(disabled, p_unsigned);
- PRINT_ATTRf(inherit, p_unsigned);
- PRINT_ATTRf(pinned, p_unsigned);
- PRINT_ATTRf(exclusive, p_unsigned);
- PRINT_ATTRf(exclude_user, p_unsigned);
- PRINT_ATTRf(exclude_kernel, p_unsigned);
- PRINT_ATTRf(exclude_hv, p_unsigned);
- PRINT_ATTRf(exclude_idle, p_unsigned);
- PRINT_ATTRf(mmap, p_unsigned);
- PRINT_ATTRf(comm, p_unsigned);
- PRINT_ATTRf(freq, p_unsigned);
- PRINT_ATTRf(inherit_stat, p_unsigned);
- PRINT_ATTRf(enable_on_exec, p_unsigned);
- PRINT_ATTRf(task, p_unsigned);
- PRINT_ATTRf(watermark, p_unsigned);
- PRINT_ATTRf(precise_ip, p_unsigned);
- PRINT_ATTRf(mmap_data, p_unsigned);
- PRINT_ATTRf(sample_id_all, p_unsigned);
- PRINT_ATTRf(exclude_host, p_unsigned);
- PRINT_ATTRf(exclude_guest, p_unsigned);
- PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
- PRINT_ATTRf(exclude_callchain_user, p_unsigned);
- PRINT_ATTRf(mmap2, p_unsigned);
- PRINT_ATTRf(comm_exec, p_unsigned);
- PRINT_ATTRf(use_clockid, p_unsigned);
- PRINT_ATTRf(context_switch, p_unsigned);
- PRINT_ATTRf(write_backward, p_unsigned);
- PRINT_ATTRf(namespaces, p_unsigned);
- PRINT_ATTRf(ksymbol, p_unsigned);
- PRINT_ATTRf(bpf_event, p_unsigned);
- PRINT_ATTRf(aux_output, p_unsigned);
-
- PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
- PRINT_ATTRf(bp_type, p_unsigned);
- PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
- PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
- PRINT_ATTRf(branch_sample_type, p_branch_sample_type);
- PRINT_ATTRf(sample_regs_user, p_hex);
- PRINT_ATTRf(sample_stack_user, p_unsigned);
- PRINT_ATTRf(clockid, p_signed);
- PRINT_ATTRf(sample_regs_intr, p_hex);
- PRINT_ATTRf(aux_watermark, p_unsigned);
- PRINT_ATTRf(sample_max_stack, p_unsigned);
-
- return ret;
-}
-
-static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
- void *priv __maybe_unused)
-{
- return fprintf(fp, " %-32s %s\n", name, val);
-}
-
static void perf_evsel__remove_fd(struct evsel *pos,
int nr_cpus, int nr_threads,
int thread_idx)
@@ -1662,7 +1490,7 @@ static bool ignore_missing_thread(struct evsel *evsel,
return false;
/* The system wide setup does not work with threads. */
- if (evsel->system_wide)
+ if (evsel->core.system_wide)
return false;
/* The -ESRCH is perf event syscall errno for pid's not found. */
@@ -1688,6 +1516,12 @@ static bool ignore_missing_thread(struct evsel *evsel,
return true;
}
+static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
+ void *priv __maybe_unused)
+{
+ return fprintf(fp, " %-32s %s\n", name, val);
+}
+
static void display_attr(struct perf_event_attr *attr)
{
if (verbose >= 2) {
@@ -1771,7 +1605,7 @@ int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
threads = empty_thread_map;
}
- if (evsel->system_wide)
+ if (evsel->core.system_wide)
nthreads = 1;
else
nthreads = threads->nr;
@@ -1818,7 +1652,7 @@ retry_sample_id:
for (thread = 0; thread < nthreads; thread++) {
int fd, group_fd;
- if (!evsel->cgrp && !evsel->system_wide)
+ if (!evsel->cgrp && !evsel->core.system_wide)
pid = perf_thread_map__pid(threads, thread);
group_fd = get_group_fd(evsel, cpu, thread);
@@ -1991,7 +1825,7 @@ out_close:
void evsel__close(struct evsel *evsel)
{
perf_evsel__close(&evsel->core);
- perf_evsel__free_id(evsel);
+ perf_evsel__free_id(&evsel->core);
}
int perf_evsel__open_per_cpu(struct evsel *evsel,
@@ -2419,283 +2253,6 @@ int perf_evsel__parse_sample_timestamp(struct evsel *evsel,
return 0;
}
-size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
- u64 read_format)
-{
- size_t sz, result = sizeof(struct perf_record_sample);
-
- if (type & PERF_SAMPLE_IDENTIFIER)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_IP)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_TID)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_TIME)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_ADDR)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_ID)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_STREAM_ID)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_CPU)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_PERIOD)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_READ) {
- result += sizeof(u64);
- if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
- result += sizeof(u64);
- if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
- result += sizeof(u64);
- /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
- if (read_format & PERF_FORMAT_GROUP) {
- sz = sample->read.group.nr *
- sizeof(struct sample_read_value);
- result += sz;
- } else {
- result += sizeof(u64);
- }
- }
-
- if (type & PERF_SAMPLE_CALLCHAIN) {
- sz = (sample->callchain->nr + 1) * sizeof(u64);
- result += sz;
- }
-
- if (type & PERF_SAMPLE_RAW) {
- result += sizeof(u32);
- result += sample->raw_size;
- }
-
- if (type & PERF_SAMPLE_BRANCH_STACK) {
- sz = sample->branch_stack->nr * sizeof(struct branch_entry);
- sz += sizeof(u64);
- result += sz;
- }
-
- if (type & PERF_SAMPLE_REGS_USER) {
- if (sample->user_regs.abi) {
- result += sizeof(u64);
- sz = hweight64(sample->user_regs.mask) * sizeof(u64);
- result += sz;
- } else {
- result += sizeof(u64);
- }
- }
-
- if (type & PERF_SAMPLE_STACK_USER) {
- sz = sample->user_stack.size;
- result += sizeof(u64);
- if (sz) {
- result += sz;
- result += sizeof(u64);
- }
- }
-
- if (type & PERF_SAMPLE_WEIGHT)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_DATA_SRC)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_TRANSACTION)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_REGS_INTR) {
- if (sample->intr_regs.abi) {
- result += sizeof(u64);
- sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
- result += sz;
- } else {
- result += sizeof(u64);
- }
- }
-
- if (type & PERF_SAMPLE_PHYS_ADDR)
- result += sizeof(u64);
-
- return result;
-}
-
-int perf_event__synthesize_sample(union perf_event *event, u64 type,
- u64 read_format,
- const struct perf_sample *sample)
-{
- __u64 *array;
- size_t sz;
- /*
- * used for cross-endian analysis. See git commit 65014ab3
- * for why this goofiness is needed.
- */
- union u64_swap u;
-
- array = event->sample.array;
-
- if (type & PERF_SAMPLE_IDENTIFIER) {
- *array = sample->id;
- array++;
- }
-
- if (type & PERF_SAMPLE_IP) {
- *array = sample->ip;
- array++;
- }
-
- if (type & PERF_SAMPLE_TID) {
- u.val32[0] = sample->pid;
- u.val32[1] = sample->tid;
- *array = u.val64;
- array++;
- }
-
- if (type & PERF_SAMPLE_TIME) {
- *array = sample->time;
- array++;
- }
-
- if (type & PERF_SAMPLE_ADDR) {
- *array = sample->addr;
- array++;
- }
-
- if (type & PERF_SAMPLE_ID) {
- *array = sample->id;
- array++;
- }
-
- if (type & PERF_SAMPLE_STREAM_ID) {
- *array = sample->stream_id;
- array++;
- }
-
- if (type & PERF_SAMPLE_CPU) {
- u.val32[0] = sample->cpu;
- u.val32[1] = 0;
- *array = u.val64;
- array++;
- }
-
- if (type & PERF_SAMPLE_PERIOD) {
- *array = sample->period;
- array++;
- }
-
- if (type & PERF_SAMPLE_READ) {
- if (read_format & PERF_FORMAT_GROUP)
- *array = sample->read.group.nr;
- else
- *array = sample->read.one.value;
- array++;
-
- if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
- *array = sample->read.time_enabled;
- array++;
- }
-
- if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
- *array = sample->read.time_running;
- array++;
- }
-
- /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
- if (read_format & PERF_FORMAT_GROUP) {
- sz = sample->read.group.nr *
- sizeof(struct sample_read_value);
- memcpy(array, sample->read.group.values, sz);
- array = (void *)array + sz;
- } else {
- *array = sample->read.one.id;
- array++;
- }
- }
-
- if (type & PERF_SAMPLE_CALLCHAIN) {
- sz = (sample->callchain->nr + 1) * sizeof(u64);
- memcpy(array, sample->callchain, sz);
- array = (void *)array + sz;
- }
-
- if (type & PERF_SAMPLE_RAW) {
- u.val32[0] = sample->raw_size;
- *array = u.val64;
- array = (void *)array + sizeof(u32);
-
- memcpy(array, sample->raw_data, sample->raw_size);
- array = (void *)array + sample->raw_size;
- }
-
- if (type & PERF_SAMPLE_BRANCH_STACK) {
- sz = sample->branch_stack->nr * sizeof(struct branch_entry);
- sz += sizeof(u64);
- memcpy(array, sample->branch_stack, sz);
- array = (void *)array + sz;
- }
-
- if (type & PERF_SAMPLE_REGS_USER) {
- if (sample->user_regs.abi) {
- *array++ = sample->user_regs.abi;
- sz = hweight64(sample->user_regs.mask) * sizeof(u64);
- memcpy(array, sample->user_regs.regs, sz);
- array = (void *)array + sz;
- } else {
- *array++ = 0;
- }
- }
-
- if (type & PERF_SAMPLE_STACK_USER) {
- sz = sample->user_stack.size;
- *array++ = sz;
- if (sz) {
- memcpy(array, sample->user_stack.data, sz);
- array = (void *)array + sz;
- *array++ = sz;
- }
- }
-
- if (type & PERF_SAMPLE_WEIGHT) {
- *array = sample->weight;
- array++;
- }
-
- if (type & PERF_SAMPLE_DATA_SRC) {
- *array = sample->data_src;
- array++;
- }
-
- if (type & PERF_SAMPLE_TRANSACTION) {
- *array = sample->transaction;
- array++;
- }
-
- if (type & PERF_SAMPLE_REGS_INTR) {
- if (sample->intr_regs.abi) {
- *array++ = sample->intr_regs.abi;
- sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
- memcpy(array, sample->intr_regs.regs, sz);
- array = (void *)array + sz;
- } else {
- *array++ = 0;
- }
- }
-
- if (type & PERF_SAMPLE_PHYS_ADDR) {
- *array = sample->phys_addr;
- array++;
- }
-
- return 0;
-}
-
struct tep_format_field *perf_evsel__field(struct evsel *evsel, const char *name)
{
return tep_find_field(evsel->tp_format, name);
@@ -2811,9 +2368,11 @@ bool perf_evsel__fallback(struct evsel *evsel, int err,
if (evsel->name)
free(evsel->name);
evsel->name = new_name;
- scnprintf(msg, msgsize,
-"kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid);
+ scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying "
+ "to fall back to excluding kernel and hypervisor "
+ " samples", paranoid);
evsel->core.attr.exclude_kernel = 1;
+ evsel->core.attr.exclude_hv = 1;
return true;
}
@@ -2954,7 +2513,7 @@ struct perf_env *perf_evsel__env(struct evsel *evsel)
{
if (evsel && evsel->evlist)
return evsel->evlist->env;
- return NULL;
+ return &perf_env;
}
static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
@@ -2966,7 +2525,7 @@ static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
thread++) {
int fd = FD(evsel, cpu, thread);
- if (perf_evlist__id_add_fd(evlist, evsel,
+ if (perf_evlist__id_add_fd(&evlist->core, &evsel->core,
cpu, thread, fd) < 0)
return -1;
}
@@ -2980,7 +2539,7 @@ int perf_evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
struct perf_cpu_map *cpus = evsel->core.cpus;
struct perf_thread_map *threads = evsel->core.threads;
- if (perf_evsel__alloc_id(evsel, cpus->nr, threads->nr))
+ if (perf_evsel__alloc_id(&evsel->core, cpus->nr, threads->nr))
return -ENOMEM;
return store_evsel_ids(evsel, evlist);
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 68321d10eb2d..ddc5ee6f6592 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -4,7 +4,6 @@
#include <linux/list.h>
#include <stdbool.h>
-#include <stdio.h>
#include <sys/types.h>
#include <linux/perf_event.h>
#include <linux/types.h>
@@ -13,79 +12,11 @@
#include "symbol_conf.h"
#include <internal/cpumap.h>
-struct addr_location;
-struct evsel;
-union perf_event;
-
-/*
- * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
- * more than one entry in the evlist.
- */
-struct perf_sample_id {
- struct hlist_node node;
- u64 id;
- struct evsel *evsel;
- /*
- * 'idx' will be used for AUX area sampling. A sample will have AUX area
- * data that will be queued for decoding, where there are separate
- * queues for each CPU (per-cpu tracing) or task (per-thread tracing).
- * The sample ID can be used to lookup 'idx' which is effectively the
- * queue number.
- */
- int idx;
- int cpu;
- pid_t tid;
-
- /* Holds total ID period value for PERF_SAMPLE_READ processing. */
- u64 period;
-};
-
+struct bpf_object;
struct cgroup;
-
-/*
- * The 'struct perf_evsel_config_term' is used to pass event
- * specific configuration data to perf_evsel__config routine.
- * It is allocated within event parsing and attached to
- * perf_evsel::config_terms list head.
-*/
-enum term_type {
- PERF_EVSEL__CONFIG_TERM_PERIOD,
- PERF_EVSEL__CONFIG_TERM_FREQ,
- PERF_EVSEL__CONFIG_TERM_TIME,
- PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
- PERF_EVSEL__CONFIG_TERM_STACK_USER,
- PERF_EVSEL__CONFIG_TERM_INHERIT,
- PERF_EVSEL__CONFIG_TERM_MAX_STACK,
- PERF_EVSEL__CONFIG_TERM_MAX_EVENTS,
- PERF_EVSEL__CONFIG_TERM_OVERWRITE,
- PERF_EVSEL__CONFIG_TERM_DRV_CFG,
- PERF_EVSEL__CONFIG_TERM_BRANCH,
- PERF_EVSEL__CONFIG_TERM_PERCORE,
- PERF_EVSEL__CONFIG_TERM_AUX_OUTPUT,
-};
-
-struct perf_evsel_config_term {
- struct list_head list;
- enum term_type type;
- union {
- u64 period;
- u64 freq;
- bool time;
- char *callgraph;
- char *drv_cfg;
- u64 stack_user;
- int max_stack;
- bool inherit;
- bool overwrite;
- char *branch;
- unsigned long max_events;
- bool percore;
- bool aux_output;
- } val;
- bool weak;
-};
-
+struct perf_counts;
struct perf_stat_evsel;
+union perf_event;
typedef int (perf_evsel__sb_cb_t)(union perf_event *event, void *data);
@@ -94,10 +25,6 @@ enum perf_tool_event {
PERF_TOOL_DURATION_TIME = 1,
};
-struct bpf_object;
-struct perf_counts;
-struct xyarray;
-
/** struct evsel - event selector
*
* @evlist - evlist this evsel is in, if it is in one.
@@ -117,12 +44,9 @@ struct evsel {
struct perf_evsel core;
struct evlist *evlist;
char *filter;
- struct xyarray *sample_id;
- u64 *id;
struct perf_counts *counts;
struct perf_counts *prev_raw_counts;
int idx;
- u32 ids;
unsigned long max_events;
unsigned long nr_events_printed;
char *name;
@@ -146,7 +70,6 @@ struct evsel {
bool disabled;
bool no_aux_samples;
bool immediate;
- bool system_wide;
bool tracking;
bool per_pkg;
bool precise_max;
@@ -179,11 +102,6 @@ struct evsel {
} side_band;
};
-union u64_swap {
- u64 val64;
- u32 val32[2];
-};
-
struct perf_missing_features {
bool sample_id_all;
bool exclude_guest;
@@ -282,8 +200,6 @@ const char *perf_evsel__name(struct evsel *evsel);
const char *perf_evsel__group_name(struct evsel *evsel);
int perf_evsel__group_desc(struct evsel *evsel, char *buf, size_t size);
-int perf_evsel__alloc_id(struct evsel *evsel, int ncpus, int nthreads);
-
void __perf_evsel__set_sample_bit(struct evsel *evsel,
enum perf_event_sample_format bit);
void __perf_evsel__reset_sample_bit(struct evsel *evsel,
@@ -439,37 +355,6 @@ static inline bool perf_evsel__is_clock(struct evsel *evsel)
perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
}
-struct perf_attr_details {
- bool freq;
- bool verbose;
- bool event_group;
- bool force;
- bool trace_fields;
-};
-
-int perf_evsel__fprintf(struct evsel *evsel,
- struct perf_attr_details *details, FILE *fp);
-
-#define EVSEL__PRINT_IP (1<<0)
-#define EVSEL__PRINT_SYM (1<<1)
-#define EVSEL__PRINT_DSO (1<<2)
-#define EVSEL__PRINT_SYMOFFSET (1<<3)
-#define EVSEL__PRINT_ONELINE (1<<4)
-#define EVSEL__PRINT_SRCLINE (1<<5)
-#define EVSEL__PRINT_UNKNOWN_AS_ADDR (1<<6)
-#define EVSEL__PRINT_CALLCHAIN_ARROW (1<<7)
-#define EVSEL__PRINT_SKIP_IGNORED (1<<8)
-
-struct callchain_cursor;
-
-int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
- unsigned int print_opts,
- struct callchain_cursor *cursor, FILE *fp);
-
-int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
- int left_alignment, unsigned int print_opts,
- struct callchain_cursor *cursor, FILE *fp);
-
bool perf_evsel__fallback(struct evsel *evsel, int err,
char *msg, size_t msgsize);
int perf_evsel__open_strerror(struct evsel *evsel, struct target *target,
@@ -502,11 +387,6 @@ static inline bool evsel__has_callchain(const struct evsel *evsel)
return (evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0;
}
-typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
-
-int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
- attr__fprintf_f attr__fprintf, void *priv);
-
struct perf_env *perf_evsel__env(struct evsel *evsel);
int perf_evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
diff --git a/tools/perf/util/evsel_config.h b/tools/perf/util/evsel_config.h
new file mode 100644
index 000000000000..8a7648037c18
--- /dev/null
+++ b/tools/perf/util/evsel_config.h
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __PERF_EVSEL_CONFIG_H
+#define __PERF_EVSEL_CONFIG_H 1
+
+#include <linux/types.h>
+#include <stdbool.h>
+
+/*
+ * The 'struct perf_evsel_config_term' is used to pass event
+ * specific configuration data to perf_evsel__config routine.
+ * It is allocated within event parsing and attached to
+ * perf_evsel::config_terms list head.
+*/
+enum evsel_term_type {
+ PERF_EVSEL__CONFIG_TERM_PERIOD,
+ PERF_EVSEL__CONFIG_TERM_FREQ,
+ PERF_EVSEL__CONFIG_TERM_TIME,
+ PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
+ PERF_EVSEL__CONFIG_TERM_STACK_USER,
+ PERF_EVSEL__CONFIG_TERM_INHERIT,
+ PERF_EVSEL__CONFIG_TERM_MAX_STACK,
+ PERF_EVSEL__CONFIG_TERM_MAX_EVENTS,
+ PERF_EVSEL__CONFIG_TERM_OVERWRITE,
+ PERF_EVSEL__CONFIG_TERM_DRV_CFG,
+ PERF_EVSEL__CONFIG_TERM_BRANCH,
+ PERF_EVSEL__CONFIG_TERM_PERCORE,
+ PERF_EVSEL__CONFIG_TERM_AUX_OUTPUT,
+};
+
+struct perf_evsel_config_term {
+ struct list_head list;
+ enum evsel_term_type type;
+ union {
+ u64 period;
+ u64 freq;
+ bool time;
+ char *callgraph;
+ char *drv_cfg;
+ u64 stack_user;
+ int max_stack;
+ bool inherit;
+ bool overwrite;
+ char *branch;
+ unsigned long max_events;
+ bool percore;
+ bool aux_output;
+ } val;
+ bool weak;
+};
+#endif // __PERF_EVSEL_CONFIG_H
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index 496fec01f5d1..028df7afb0dc 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -4,6 +4,8 @@
#include <stdbool.h>
#include <traceevent/event-parse.h>
#include "evsel.h"
+#include "util/evsel_fprintf.h"
+#include "util/event.h"
#include "callchain.h"
#include "map.h"
#include "strlist.h"
@@ -101,7 +103,7 @@ out:
int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
unsigned int print_opts, struct callchain_cursor *cursor,
- FILE *fp)
+ struct strlist *bt_stop_list, FILE *fp)
{
int printed = 0;
struct callchain_cursor_node *node;
@@ -174,10 +176,8 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
printed += fprintf(fp, "\n");
/* Add srccode here too? */
- if (symbol_conf.bt_stop_list &&
- node->sym &&
- strlist__has_entry(symbol_conf.bt_stop_list,
- node->sym->name)) {
+ if (bt_stop_list && node->sym &&
+ strlist__has_entry(bt_stop_list, node->sym->name)) {
break;
}
@@ -192,7 +192,7 @@ next:
int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
int left_alignment, unsigned int print_opts,
- struct callchain_cursor *cursor, FILE *fp)
+ struct callchain_cursor *cursor, struct strlist *bt_stop_list, FILE *fp)
{
int printed = 0;
int print_ip = print_opts & EVSEL__PRINT_IP;
@@ -203,8 +203,8 @@ int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
int print_unknown_as_addr = print_opts & EVSEL__PRINT_UNKNOWN_AS_ADDR;
if (cursor != NULL) {
- printed += sample__fprintf_callchain(sample, left_alignment,
- print_opts, cursor, fp);
+ printed += sample__fprintf_callchain(sample, left_alignment, print_opts,
+ cursor, bt_stop_list, fp);
} else {
printed += fprintf(fp, "%-*.*s", left_alignment, left_alignment, " ");
diff --git a/tools/perf/util/evsel_fprintf.h b/tools/perf/util/evsel_fprintf.h
new file mode 100644
index 000000000000..47e6c8456bb1
--- /dev/null
+++ b/tools/perf/util/evsel_fprintf.h
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __PERF_EVSEL_FPRINTF_H
+#define __PERF_EVSEL_FPRINTF_H 1
+
+#include <stdio.h>
+#include <stdbool.h>
+
+struct evsel;
+
+struct perf_attr_details {
+ bool freq;
+ bool verbose;
+ bool event_group;
+ bool force;
+ bool trace_fields;
+};
+
+int perf_evsel__fprintf(struct evsel *evsel,
+ struct perf_attr_details *details, FILE *fp);
+
+#define EVSEL__PRINT_IP (1<<0)
+#define EVSEL__PRINT_SYM (1<<1)
+#define EVSEL__PRINT_DSO (1<<2)
+#define EVSEL__PRINT_SYMOFFSET (1<<3)
+#define EVSEL__PRINT_ONELINE (1<<4)
+#define EVSEL__PRINT_SRCLINE (1<<5)
+#define EVSEL__PRINT_UNKNOWN_AS_ADDR (1<<6)
+#define EVSEL__PRINT_CALLCHAIN_ARROW (1<<7)
+#define EVSEL__PRINT_SKIP_IGNORED (1<<8)
+
+struct addr_location;
+struct perf_event_attr;
+struct perf_sample;
+struct callchain_cursor;
+struct strlist;
+
+int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
+ unsigned int print_opts, struct callchain_cursor *cursor,
+ struct strlist *bt_stop_list, FILE *fp);
+
+int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
+ int left_alignment, unsigned int print_opts,
+ struct callchain_cursor *cursor,
+ struct strlist *bt_stop_list, FILE *fp);
+
+typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
+
+int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
+ attr__fprintf_f attr__fprintf, void *priv);
+#endif // __PERF_EVSEL_H
diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
index b72440bf9a79..d4137559be05 100644
--- a/tools/perf/util/genelf.h
+++ b/tools/perf/util/genelf.h
@@ -35,6 +35,9 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
#elif defined(__sparc__)
#define GEN_ELF_ARCH EM_SPARC
#define GEN_ELF_CLASS ELFCLASS32
+#elif defined(__s390x__)
+#define GEN_ELF_ARCH EM_S390
+#define GEN_ELF_CLASS ELFCLASS64
#else
#error "unsupported architecture"
#endif
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index b0c34dda30a0..86d9396cb131 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -25,6 +25,7 @@
#include "dso.h"
#include "evlist.h"
#include "evsel.h"
+#include "util/evsel_fprintf.h"
#include "header.h"
#include "memswap.h"
#include "trace-event.h"
@@ -42,11 +43,12 @@
#include "tool.h"
#include "time-utils.h"
#include "units.h"
-#include "util.h"
+#include "util/util.h" // perf_exe()
#include "cputopo.h"
#include "bpf-event.h"
#include <linux/ctype.h>
+#include <internal/lib.h>
/*
* magic2 = "PERFILE2"
@@ -70,15 +72,6 @@ struct perf_file_attr {
struct perf_file_section ids;
};
-struct feat_fd {
- struct perf_header *ph;
- int fd;
- void *buf; /* Either buf != NULL or fd >= 0 */
- ssize_t offset;
- size_t size;
- struct evsel *events;
-};
-
void perf_header__set_feat(struct perf_header *header, int feat)
{
set_bit(feat, header->adds_features);
@@ -524,7 +517,7 @@ static int write_event_desc(struct feat_fd *ff,
* copy into an nri to be independent of the
* type of ids,
*/
- nri = evsel->ids;
+ nri = evsel->core.ids;
ret = do_write(ff, &nri, sizeof(nri));
if (ret < 0)
return ret;
@@ -538,7 +531,7 @@ static int write_event_desc(struct feat_fd *ff,
/*
* write unique ids for this event
*/
- ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64));
+ ret = do_write(ff, evsel->core.id, evsel->core.ids * sizeof(u64));
if (ret < 0)
return ret;
}
@@ -1081,7 +1074,7 @@ static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 lev
scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
if (sysfs__read_str(file, &cache->map, &len)) {
- zfree(&cache->map);
+ zfree(&cache->size);
zfree(&cache->type);
return -1;
}
@@ -1598,7 +1591,7 @@ static void free_event_desc(struct evsel *events)
for (evsel = events; evsel->core.attr.size; evsel++) {
zfree(&evsel->name);
- zfree(&evsel->id);
+ zfree(&evsel->core.id);
}
free(events);
@@ -1664,8 +1657,8 @@ static struct evsel *read_event_desc(struct feat_fd *ff)
id = calloc(nr, sizeof(*id));
if (!id)
goto error;
- evsel->ids = nr;
- evsel->id = id;
+ evsel->core.ids = nr;
+ evsel->core.id = id;
for (j = 0 ; j < nr; j++) {
if (do_read_u64(ff, id))
@@ -1707,9 +1700,9 @@ static void print_event_desc(struct feat_fd *ff, FILE *fp)
for (evsel = events; evsel->core.attr.size; evsel++) {
fprintf(fp, "# event : name = %s, ", evsel->name);
- if (evsel->ids) {
+ if (evsel->core.ids) {
fprintf(fp, ", id = {");
- for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
+ for (j = 0, id = evsel->core.id; j < evsel->core.ids; j++, id++) {
if (j)
fputc(',', fp);
fprintf(fp, " %"PRIu64, *id);
@@ -2823,15 +2816,6 @@ static int process_compressed(struct feat_fd *ff,
return 0;
}
-struct feature_ops {
- int (*write)(struct feat_fd *ff, struct evlist *evlist);
- void (*print)(struct feat_fd *ff, FILE *fp);
- int (*process)(struct feat_fd *ff, void *data);
- const char *name;
- bool full_only;
- bool synthesize;
-};
-
#define FEAT_OPR(n, func, __full_only) \
[HEADER_##n] = { \
.name = __stringify(n), \
@@ -2858,8 +2842,10 @@ struct feature_ops {
#define process_branch_stack NULL
#define process_stat NULL
+// Only used in util/synthetic-events.c
+const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
-static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
+const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPN(TRACING_DATA, tracing_data, false),
FEAT_OPN(BUILD_ID, build_id, false),
FEAT_OPR(HOSTNAME, hostname, false),
@@ -3083,7 +3069,7 @@ int perf_session__write_header(struct perf_session *session,
evlist__for_each_entry(session->evlist, evsel) {
evsel->id_offset = lseek(fd, 0, SEEK_CUR);
- err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64));
+ err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64));
if (err < 0) {
pr_debug("failed to write perf header\n");
return err;
@@ -3097,7 +3083,7 @@ int perf_session__write_header(struct perf_session *session,
.attr = evsel->core.attr,
.ids = {
.offset = evsel->id_offset,
- .size = evsel->ids * sizeof(u64),
+ .size = evsel->core.ids * sizeof(u64),
}
};
err = do_write(&ff, &f_attr, sizeof(f_attr));
@@ -3624,7 +3610,7 @@ int perf_session__read_header(struct perf_session *session)
* for allocating the perf_sample_id table we fake 1 cpu and
* hattr->ids threads.
*/
- if (perf_evsel__alloc_id(evsel, 1, nr_ids))
+ if (perf_evsel__alloc_id(&evsel->core, 1, nr_ids))
goto out_delete_evlist;
lseek(fd, f_attr.ids.offset, SEEK_SET);
@@ -3633,7 +3619,7 @@ int perf_session__read_header(struct perf_session *session)
if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
goto out_errno;
- perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
+ perf_evlist__id_add(&session->evlist->core, &evsel->core, 0, j, f_id);
}
lseek(fd, tmp, SEEK_SET);
@@ -3656,105 +3642,6 @@ out_delete_evlist:
return -ENOMEM;
}
-int perf_event__synthesize_attr(struct perf_tool *tool,
- struct perf_event_attr *attr, u32 ids, u64 *id,
- perf_event__handler_t process)
-{
- union perf_event *ev;
- size_t size;
- int err;
-
- size = sizeof(struct perf_event_attr);
- size = PERF_ALIGN(size, sizeof(u64));
- size += sizeof(struct perf_event_header);
- size += ids * sizeof(u64);
-
- ev = zalloc(size);
-
- if (ev == NULL)
- return -ENOMEM;
-
- ev->attr.attr = *attr;
- memcpy(ev->attr.id, id, ids * sizeof(u64));
-
- ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
- ev->attr.header.size = (u16)size;
-
- if (ev->attr.header.size == size)
- err = process(tool, ev, NULL, NULL);
- else
- err = -E2BIG;
-
- free(ev);
-
- return err;
-}
-
-int perf_event__synthesize_features(struct perf_tool *tool,
- struct perf_session *session,
- struct evlist *evlist,
- perf_event__handler_t process)
-{
- struct perf_header *header = &session->header;
- struct feat_fd ff;
- struct perf_record_header_feature *fe;
- size_t sz, sz_hdr;
- int feat, ret;
-
- sz_hdr = sizeof(fe->header);
- sz = sizeof(union perf_event);
- /* get a nice alignment */
- sz = PERF_ALIGN(sz, page_size);
-
- memset(&ff, 0, sizeof(ff));
-
- ff.buf = malloc(sz);
- if (!ff.buf)
- return -ENOMEM;
-
- ff.size = sz - sz_hdr;
- ff.ph = &session->header;
-
- for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
- if (!feat_ops[feat].synthesize) {
- pr_debug("No record header feature for header :%d\n", feat);
- continue;
- }
-
- ff.offset = sizeof(*fe);
-
- ret = feat_ops[feat].write(&ff, evlist);
- if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
- pr_debug("Error writing feature\n");
- continue;
- }
- /* ff.buf may have changed due to realloc in do_write() */
- fe = ff.buf;
- memset(fe, 0, sizeof(*fe));
-
- fe->feat_id = feat;
- fe->header.type = PERF_RECORD_HEADER_FEATURE;
- fe->header.size = ff.offset;
-
- ret = process(tool, ff.buf, NULL, NULL);
- if (ret) {
- free(ff.buf);
- return ret;
- }
- }
-
- /* Send HEADER_LAST_FEATURE mark. */
- fe = ff.buf;
- fe->feat_id = HEADER_LAST_FEATURE;
- fe->header.type = PERF_RECORD_HEADER_FEATURE;
- fe->header.size = sizeof(*fe);
-
- ret = process(tool, ff.buf, NULL, NULL);
-
- free(ff.buf);
- return ret;
-}
-
int perf_event__process_feature(struct perf_session *session,
union perf_event *event)
{
@@ -3797,113 +3684,6 @@ int perf_event__process_feature(struct perf_session *session,
return 0;
}
-static struct perf_record_event_update *
-event_update_event__new(size_t size, u64 type, u64 id)
-{
- struct perf_record_event_update *ev;
-
- size += sizeof(*ev);
- size = PERF_ALIGN(size, sizeof(u64));
-
- ev = zalloc(size);
- if (ev) {
- ev->header.type = PERF_RECORD_EVENT_UPDATE;
- ev->header.size = (u16)size;
- ev->type = type;
- ev->id = id;
- }
- return ev;
-}
-
-int
-perf_event__synthesize_event_update_unit(struct perf_tool *tool,
- struct evsel *evsel,
- perf_event__handler_t process)
-{
- struct perf_record_event_update *ev;
- size_t size = strlen(evsel->unit);
- int err;
-
- ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
- if (ev == NULL)
- return -ENOMEM;
-
- strlcpy(ev->data, evsel->unit, size + 1);
- err = process(tool, (union perf_event *)ev, NULL, NULL);
- free(ev);
- return err;
-}
-
-int
-perf_event__synthesize_event_update_scale(struct perf_tool *tool,
- struct evsel *evsel,
- perf_event__handler_t process)
-{
- struct perf_record_event_update *ev;
- struct perf_record_event_update_scale *ev_data;
- int err;
-
- ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
- if (ev == NULL)
- return -ENOMEM;
-
- ev_data = (struct perf_record_event_update_scale *)ev->data;
- ev_data->scale = evsel->scale;
- err = process(tool, (union perf_event*) ev, NULL, NULL);
- free(ev);
- return err;
-}
-
-int
-perf_event__synthesize_event_update_name(struct perf_tool *tool,
- struct evsel *evsel,
- perf_event__handler_t process)
-{
- struct perf_record_event_update *ev;
- size_t len = strlen(evsel->name);
- int err;
-
- ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
- if (ev == NULL)
- return -ENOMEM;
-
- strlcpy(ev->data, evsel->name, len + 1);
- err = process(tool, (union perf_event*) ev, NULL, NULL);
- free(ev);
- return err;
-}
-
-int
-perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
- struct evsel *evsel,
- perf_event__handler_t process)
-{
- size_t size = sizeof(struct perf_record_event_update);
- struct perf_record_event_update *ev;
- int max, err;
- u16 type;
-
- if (!evsel->core.own_cpus)
- return 0;
-
- ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
- if (!ev)
- return -ENOMEM;
-
- ev->header.type = PERF_RECORD_EVENT_UPDATE;
- ev->header.size = (u16)size;
- ev->type = PERF_EVENT_UPDATE__CPUS;
- ev->id = evsel->id[0];
-
- cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
- evsel->core.own_cpus,
- type, max);
-
- err = process(tool, (union perf_event*) ev, NULL, NULL);
- free(ev);
- return err;
-}
-
size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
{
struct perf_record_event_update *ev = &event->event_update;
@@ -3943,93 +3723,6 @@ size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
return ret;
}
-int perf_event__synthesize_attrs(struct perf_tool *tool,
- struct evlist *evlist,
- perf_event__handler_t process)
-{
- struct evsel *evsel;
- int err = 0;
-
- evlist__for_each_entry(evlist, evsel) {
- err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->ids,
- evsel->id, process);
- if (err) {
- pr_debug("failed to create perf header attribute\n");
- return err;
- }
- }
-
- return err;
-}
-
-static bool has_unit(struct evsel *counter)
-{
- return counter->unit && *counter->unit;
-}
-
-static bool has_scale(struct evsel *counter)
-{
- return counter->scale != 1;
-}
-
-int perf_event__synthesize_extra_attr(struct perf_tool *tool,
- struct evlist *evsel_list,
- perf_event__handler_t process,
- bool is_pipe)
-{
- struct evsel *counter;
- int err;
-
- /*
- * Synthesize other events stuff not carried within
- * attr event - unit, scale, name
- */
- evlist__for_each_entry(evsel_list, counter) {
- if (!counter->supported)
- continue;
-
- /*
- * Synthesize unit and scale only if it's defined.
- */
- if (has_unit(counter)) {
- err = perf_event__synthesize_event_update_unit(tool, counter, process);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel unit.\n");
- return err;
- }
- }
-
- if (has_scale(counter)) {
- err = perf_event__synthesize_event_update_scale(tool, counter, process);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel counter.\n");
- return err;
- }
- }
-
- if (counter->core.own_cpus) {
- err = perf_event__synthesize_event_update_cpus(tool, counter, process);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel cpus.\n");
- return err;
- }
- }
-
- /*
- * Name is needed only for pipe output,
- * perf.data carries event names.
- */
- if (is_pipe) {
- err = perf_event__synthesize_event_update_name(tool, counter, process);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel name.\n");
- return err;
- }
- }
- }
- return 0;
-}
-
int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct evlist **pevlist)
@@ -4058,11 +3751,11 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
* for allocating the perf_sample_id table we fake 1 cpu and
* hattr->ids threads.
*/
- if (perf_evsel__alloc_id(evsel, 1, n_ids))
+ if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
return -ENOMEM;
for (i = 0; i < n_ids; i++) {
- perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
+ perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]);
}
return 0;
@@ -4114,55 +3807,6 @@ int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
return 0;
}
-int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
- struct evlist *evlist,
- perf_event__handler_t process)
-{
- union perf_event ev;
- struct tracing_data *tdata;
- ssize_t size = 0, aligned_size = 0, padding;
- struct feat_fd ff;
- int err __maybe_unused = 0;
-
- /*
- * We are going to store the size of the data followed
- * by the data contents. Since the fd descriptor is a pipe,
- * we cannot seek back to store the size of the data once
- * we know it. Instead we:
- *
- * - write the tracing data to the temp file
- * - get/write the data size to pipe
- * - write the tracing data from the temp file
- * to the pipe
- */
- tdata = tracing_data_get(&evlist->core.entries, fd, true);
- if (!tdata)
- return -1;
-
- memset(&ev, 0, sizeof(ev));
-
- ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
- size = tdata->size;
- aligned_size = PERF_ALIGN(size, sizeof(u64));
- padding = aligned_size - size;
- ev.tracing_data.header.size = sizeof(ev.tracing_data);
- ev.tracing_data.size = aligned_size;
-
- process(tool, &ev, NULL, NULL);
-
- /*
- * The put function will copy all the tracing data
- * stored in temp file to the pipe.
- */
- tracing_data_put(tdata);
-
- ff = (struct feat_fd){ .fd = fd };
- if (write_padded(&ff, NULL, 0, padding))
- return -1;
-
- return aligned_size;
-}
-
int perf_event__process_tracing_data(struct perf_session *session,
union perf_event *event)
{
@@ -4202,34 +3846,6 @@ int perf_event__process_tracing_data(struct perf_session *session,
return size_read + padding;
}
-int perf_event__synthesize_build_id(struct perf_tool *tool,
- struct dso *pos, u16 misc,
- perf_event__handler_t process,
- struct machine *machine)
-{
- union perf_event ev;
- size_t len;
- int err = 0;
-
- if (!pos->hit)
- return err;
-
- memset(&ev, 0, sizeof(ev));
-
- len = pos->long_name_len + 1;
- len = PERF_ALIGN(len, NAME_ALIGN);
- memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
- ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
- ev.build_id.header.misc = misc;
- ev.build_id.pid = machine->pid;
- ev.build_id.header.size = sizeof(ev.build_id) + len;
- memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
-
- err = process(tool, &ev, NULL, machine);
-
- return err;
-}
-
int perf_event__process_build_id(struct perf_session *session,
union perf_event *event)
{
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 3e48ae3c49b1..ca53a929e9fd 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -5,10 +5,10 @@
#include <linux/stddef.h>
#include <linux/perf_event.h>
#include <sys/types.h>
+#include <stdio.h> // FILE
#include <stdbool.h>
#include <linux/bitmap.h>
#include <linux/types.h>
-#include "event.h"
#include "env.h"
#include "pmu.h"
@@ -92,8 +92,28 @@ struct perf_header {
struct perf_env env;
};
+struct feat_fd {
+ struct perf_header *ph;
+ int fd;
+ void *buf; /* Either buf != NULL or fd >= 0 */
+ ssize_t offset;
+ size_t size;
+ struct evsel *events;
+};
+
+struct perf_header_feature_ops {
+ int (*write)(struct feat_fd *ff, struct evlist *evlist);
+ void (*print)(struct feat_fd *ff, FILE *fp);
+ int (*process)(struct feat_fd *ff, void *data);
+ const char *name;
+ bool full_only;
+ bool synthesize;
+};
+
struct evlist;
struct perf_session;
+struct perf_tool;
+union perf_event;
int perf_session__read_header(struct perf_session *session);
int perf_session__write_header(struct perf_session *session,
@@ -115,54 +135,16 @@ int perf_header__process_sections(struct perf_header *header, int fd,
int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full);
-int perf_event__synthesize_features(struct perf_tool *tool,
- struct perf_session *session,
- struct evlist *evlist,
- perf_event__handler_t process);
-
-int perf_event__synthesize_extra_attr(struct perf_tool *tool,
- struct evlist *evsel_list,
- perf_event__handler_t process,
- bool is_pipe);
-
int perf_event__process_feature(struct perf_session *session,
union perf_event *event);
-
-int perf_event__synthesize_attr(struct perf_tool *tool,
- struct perf_event_attr *attr, u32 ids, u64 *id,
- perf_event__handler_t process);
-int perf_event__synthesize_attrs(struct perf_tool *tool,
- struct evlist *evlist,
- perf_event__handler_t process);
-int perf_event__synthesize_event_update_unit(struct perf_tool *tool,
- struct evsel *evsel,
- perf_event__handler_t process);
-int perf_event__synthesize_event_update_scale(struct perf_tool *tool,
- struct evsel *evsel,
- perf_event__handler_t process);
-int perf_event__synthesize_event_update_name(struct perf_tool *tool,
- struct evsel *evsel,
- perf_event__handler_t process);
-int perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
- struct evsel *evsel,
- perf_event__handler_t process);
int perf_event__process_attr(struct perf_tool *tool, union perf_event *event,
struct evlist **pevlist);
int perf_event__process_event_update(struct perf_tool *tool,
union perf_event *event,
struct evlist **pevlist);
size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp);
-
-int perf_event__synthesize_tracing_data(struct perf_tool *tool,
- int fd, struct evlist *evlist,
- perf_event__handler_t process);
int perf_event__process_tracing_data(struct perf_session *session,
union perf_event *event);
-
-int perf_event__synthesize_build_id(struct perf_tool *tool,
- struct dso *pos, u16 misc,
- perf_event__handler_t process,
- struct machine *machine);
int perf_event__process_build_id(struct perf_session *session,
union perf_event *event);
bool is_perf_magic(u64 magic);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 34803e33dc80..6a186b668303 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -15,6 +15,7 @@ struct addr_location;
struct map_symbol;
struct mem_info;
struct branch_info;
+struct branch_stack;
struct block_info;
struct symbol;
struct ui_progress;
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index aacffa2b0362..34cb380d19a3 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -14,7 +14,6 @@
#include <linux/log2.h>
#include <linux/zalloc.h>
-#include "cpumap.h"
#include "color.h"
#include "evsel.h"
#include "evlist.h"
@@ -29,6 +28,7 @@
#include "auxtrace.h"
#include "intel-pt-decoder/intel-pt-insn-decoder.h"
#include "intel-bts.h"
+#include "util/synthetic-events.h"
#define MAX_TIMESTAMP (~0ULL)
@@ -768,7 +768,7 @@ static int intel_bts_synth_events(struct intel_bts *bts,
int err;
evlist__for_each_entry(evlist, evsel) {
- if (evsel->core.attr.type == bts->pmu_type && evsel->ids) {
+ if (evsel->core.attr.type == bts->pmu_type && evsel->core.ids) {
found = true;
break;
}
@@ -795,7 +795,7 @@ static int intel_bts_synth_events(struct intel_bts *bts,
attr.sample_id_all = evsel->core.attr.sample_id_all;
attr.read_format = evsel->core.attr.read_format;
- id = evsel->id[0] + 1000000000;
+ id = evsel->core.id[0] + 1000000000;
if (!id)
id = 1;
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 9b56fb74bedf..a1c9eb6d4f40 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -33,6 +33,7 @@
#include "tsc.h"
#include "intel-pt.h"
#include "config.h"
+#include "util/synthetic-events.h"
#include "time-utils.h"
#include "../arch/x86/include/uapi/asm/perf_regs.h"
@@ -1704,7 +1705,7 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
struct intel_pt *pt = ptq->pt;
struct evsel *evsel = pt->pebs_evsel;
u64 sample_type = evsel->core.attr.sample_type;
- u64 id = evsel->id[0];
+ u64 id = evsel->core.id[0];
u8 cpumode;
if (intel_pt_skip_event(pt))
@@ -2719,7 +2720,7 @@ static void intel_pt_set_event_name(struct evlist *evlist, u64 id,
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
- if (evsel->id && evsel->id[0] == id) {
+ if (evsel->core.id && evsel->core.id[0] == id) {
if (evsel->name)
zfree(&evsel->name);
evsel->name = strdup(name);
@@ -2734,7 +2735,7 @@ static struct evsel *intel_pt_evsel(struct intel_pt *pt,
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
- if (evsel->core.attr.type == pt->pmu_type && evsel->ids)
+ if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids)
return evsel;
}
@@ -2775,7 +2776,7 @@ static int intel_pt_synth_events(struct intel_pt *pt,
attr.sample_id_all = evsel->core.attr.sample_id_all;
attr.read_format = evsel->core.attr.read_format;
- id = evsel->id[0] + 1000000000;
+ id = evsel->core.id[0] + 1000000000;
if (!id)
id = 1;
@@ -2902,7 +2903,7 @@ static void intel_pt_setup_pebs_events(struct intel_pt *pt)
return;
evlist__for_each_entry(pt->session->evlist, evsel) {
- if (evsel->core.attr.aux_output && evsel->id) {
+ if (evsel->core.attr.aux_output && evsel->core.id) {
pt->sample_pebs = true;
pt->pebs_evsel = evsel;
return;
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index b80f29bfc7bb..e3ccb0ce1938 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -15,7 +15,6 @@
#include <linux/stringify.h>
#include "build-id.h"
-#include "util.h"
#include "event.h"
#include "debug.h"
#include "evlist.h"
@@ -27,7 +26,6 @@
#include "jit.h"
#include "jitdump.h"
#include "genelf.h"
-#include "../builtin.h"
#include <linux/ctype.h>
#include <linux/zalloc.h>
@@ -397,7 +395,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
size_t size;
u16 idr_size;
const char *sym;
- uint32_t count;
+ uint64_t count;
int ret, csize, usize;
pid_t pid, tid;
struct {
@@ -420,7 +418,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
return -1;
filename = event->mmap2.filename;
- size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%u.so",
+ size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
jd->dir,
pid,
count);
@@ -531,7 +529,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
return -1;
filename = event->mmap2.filename;
- size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%"PRIu64,
+ size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
jd->dir,
pid,
jr->move.code_index);
@@ -779,7 +777,7 @@ jit_process(struct perf_session *session,
* track sample_type to compute id_all layout
* perf sets the same sample type to all events as of now
*/
- first = perf_evlist__first(session->evlist);
+ first = evlist__first(session->evlist);
jd.sample_type = first->core.attr.sample_type;
*nbytes = 0;
diff --git a/tools/perf/util/kvm-stat.h b/tools/perf/util/kvm-stat.h
index 46913637085b..6f0fa05b62b6 100644
--- a/tools/perf/util/kvm-stat.h
+++ b/tools/perf/util/kvm-stat.h
@@ -2,6 +2,8 @@
#ifndef __PERF_KVM_STAT_H
#define __PERF_KVM_STAT_H
+#ifdef HAVE_KVM_STAT_SUPPORT
+
#include "tool.h"
#include "stat.h"
#include "record.h"
@@ -144,5 +146,7 @@ extern const int decode_str_len;
extern const char *kvm_exit_reason;
extern const char *kvm_entry_trace;
extern const char *kvm_exit_trace;
+#endif /* HAVE_KVM_STAT_SUPPORT */
+extern int kvm_add_default_arch_event(int *argc, const char **argv);
#endif /* __PERF_KVM_STAT_H */
diff --git a/tools/perf/util/libunwind/arm64.c b/tools/perf/util/libunwind/arm64.c
index 66756e6be111..6b4e5a0892f8 100644
--- a/tools/perf/util/libunwind/arm64.c
+++ b/tools/perf/util/libunwind/arm64.c
@@ -22,7 +22,6 @@
#define LIBUNWIND__ARCH_REG_SP PERF_REG_ARM64_SP
#include "unwind.h"
-#include "debug.h"
#include "libunwind-aarch64.h"
#include <../../../../arch/arm64/include/uapi/asm/perf_regs.h>
#include "../../arch/arm64/util/unwind-libunwind.c"
diff --git a/tools/perf/util/libunwind/x86_32.c b/tools/perf/util/libunwind/x86_32.c
index c5e568188e19..21c216c40a3b 100644
--- a/tools/perf/util/libunwind/x86_32.c
+++ b/tools/perf/util/libunwind/x86_32.c
@@ -22,7 +22,6 @@
#define LIBUNWIND__ARCH_REG_SP PERF_REG_X86_SP
#include "unwind.h"
-#include "debug.h"
#include "libunwind-x86.h"
#include <../../../../arch/x86/include/uapi/asm/perf_regs.h>
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index 55fb4b3b1157..8b14e4a7f1dc 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -8,6 +8,7 @@
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
+#include <unistd.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/zalloc.h>
@@ -232,14 +233,14 @@ static int detect_kbuild_dir(char **kbuild_dir)
const char *prefix_dir = "";
const char *suffix_dir = "";
+ /* _UTSNAME_LENGTH is 65 */
+ char release[128];
+
char *autoconf_path;
int err;
if (!test_dir) {
- /* _UTSNAME_LENGTH is 65 */
- char release[128];
-
err = fetch_kernel_version(NULL, release,
sizeof(release));
if (err)
diff --git a/tools/perf/util/lzma.c b/tools/perf/util/lzma.c
index 397447066033..39062df02629 100644
--- a/tools/perf/util/lzma.c
+++ b/tools/perf/util/lzma.c
@@ -7,10 +7,10 @@
#include <sys/stat.h>
#include <fcntl.h>
#include "compress.h"
-#include "util.h"
#include "debug.h"
#include <string.h>
#include <unistd.h>
+#include <internal/lib.h>
#define BUFSIZE 8192
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index b4749d3eed08..70a9f8716a4b 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -32,6 +32,7 @@
#include "linux/hash.h"
#include "asm/bug.h"
#include "bpf-event.h"
+#include <internal/lib.h> // page_size
#include <linux/ctype.h>
#include <symbol/kallsyms.h>
@@ -2609,21 +2610,6 @@ int machines__for_each_thread(struct machines *machines,
return rc;
}
-int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
- struct target *target, struct perf_thread_map *threads,
- perf_event__handler_t process, bool data_mmap,
- unsigned int nr_threads_synthesize)
-{
- if (target__has_task(target))
- return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
- else if (target__has_cpu(target))
- return perf_event__synthesize_threads(tool, process,
- machine, data_mmap,
- nr_threads_synthesize);
- /* command specified */
- return 0;
-}
-
pid_t machine__get_current_tid(struct machine *machine, int cpu)
{
int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index ffd391a925a6..18e13c0ccd6a 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -6,7 +6,6 @@
#include <linux/rbtree.h>
#include "map_groups.h"
#include "dsos.h"
-#include "event.h"
#include "rwsem.h"
struct addr_location;
@@ -252,20 +251,6 @@ int machines__for_each_thread(struct machines *machines,
int (*fn)(struct thread *thread, void *p),
void *priv);
-int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
- struct target *target, struct perf_thread_map *threads,
- perf_event__handler_t process, bool data_mmap,
- unsigned int nr_threads_synthesize);
-static inline
-int machine__synthesize_threads(struct machine *machine, struct target *target,
- struct perf_thread_map *threads, bool data_mmap,
- unsigned int nr_threads_synthesize)
-{
- return __machine__synthesize_threads(machine, NULL, target, threads,
- perf_event__process, data_mmap,
- nr_threads_synthesize);
-}
-
pid_t machine__get_current_tid(struct machine *machine, int cpu);
int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
pid_t tid);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 5b83ed1ebbd6..eec9b282c047 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include "symbol.h"
+#include <assert.h>
#include <errno.h>
#include <inttypes.h>
#include <limits.h>
@@ -850,6 +851,8 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
}
after->start = map->end;
+ after->pgoff += map->end - pos->start;
+ assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
__map_groups__insert(pos->groups, after);
if (verbose >= 2 && !use_browser)
map__fprintf(after, fp);
diff --git a/tools/perf/util/memswap.h b/tools/perf/util/memswap.h
index 1e29ff903ca9..2c38e8c2d548 100644
--- a/tools/perf/util/memswap.h
+++ b/tools/perf/util/memswap.h
@@ -2,6 +2,13 @@
#ifndef PERF_MEMSWAP_H_
#define PERF_MEMSWAP_H_
+#include <linux/types.h>
+
+union u64_swap {
+ u64 val64;
+ u32 val32[2];
+};
+
void mem_bswap_64(void *src, int byte_size);
void mem_bswap_32(void *src, int byte_size);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 33c5b5495482..a35dc57d5995 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -12,6 +12,7 @@
#include <linux/zalloc.h>
#include <stdlib.h>
#include <string.h>
+#include <unistd.h> // sysconf()
#ifdef HAVE_LIBNUMA_SUPPORT
#include <numaif.h>
#endif
@@ -20,25 +21,25 @@
#include "event.h"
#include "mmap.h"
#include "../perf.h"
-#include "util.h" /* page_size */
+#include <internal/lib.h> /* page_size */
-size_t perf_mmap__mmap_len(struct perf_mmap *map)
+size_t perf_mmap__mmap_len(struct mmap *map)
{
- return map->mask + 1 + page_size;
+ return map->core.mask + 1 + page_size;
}
/* When check_messup is true, 'end' must points to a good entry */
-static union perf_event *perf_mmap__read(struct perf_mmap *map,
+static union perf_event *perf_mmap__read(struct mmap *map,
u64 *startp, u64 end)
{
- unsigned char *data = map->base + page_size;
+ unsigned char *data = map->core.base + page_size;
union perf_event *event = NULL;
int diff = end - *startp;
if (diff >= (int)sizeof(event->header)) {
size_t size;
- event = (union perf_event *)&data[*startp & map->mask];
+ event = (union perf_event *)&data[*startp & map->core.mask];
size = event->header.size;
if (size < sizeof(event->header) || diff < (int)size)
@@ -48,20 +49,20 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
* Event straddles the mmap boundary -- header should always
* be inside due to u64 alignment of output.
*/
- if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
+ if ((*startp & map->core.mask) + size != ((*startp + size) & map->core.mask)) {
unsigned int offset = *startp;
unsigned int len = min(sizeof(*event), size), cpy;
- void *dst = map->event_copy;
+ void *dst = map->core.event_copy;
do {
- cpy = min(map->mask + 1 - (offset & map->mask), len);
- memcpy(dst, &data[offset & map->mask], cpy);
+ cpy = min(map->core.mask + 1 - (offset & map->core.mask), len);
+ memcpy(dst, &data[offset & map->core.mask], cpy);
offset += cpy;
dst += cpy;
len -= cpy;
} while (len);
- event = (union perf_event *)map->event_copy;
+ event = (union perf_event *)map->core.event_copy;
}
*startp += size;
@@ -82,55 +83,55 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
* }
* perf_mmap__read_done()
*/
-union perf_event *perf_mmap__read_event(struct perf_mmap *map)
+union perf_event *perf_mmap__read_event(struct mmap *map)
{
union perf_event *event;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
- if (!refcount_read(&map->refcnt))
+ if (!refcount_read(&map->core.refcnt))
return NULL;
/* non-overwirte doesn't pause the ringbuffer */
- if (!map->overwrite)
- map->end = perf_mmap__read_head(map);
+ if (!map->core.overwrite)
+ map->core.end = perf_mmap__read_head(map);
- event = perf_mmap__read(map, &map->start, map->end);
+ event = perf_mmap__read(map, &map->core.start, map->core.end);
- if (!map->overwrite)
- map->prev = map->start;
+ if (!map->core.overwrite)
+ map->core.prev = map->core.start;
return event;
}
-static bool perf_mmap__empty(struct perf_mmap *map)
+static bool perf_mmap__empty(struct mmap *map)
{
- return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
+ return perf_mmap__read_head(map) == map->core.prev && !map->auxtrace_mmap.base;
}
-void perf_mmap__get(struct perf_mmap *map)
+void perf_mmap__get(struct mmap *map)
{
- refcount_inc(&map->refcnt);
+ refcount_inc(&map->core.refcnt);
}
-void perf_mmap__put(struct perf_mmap *map)
+void perf_mmap__put(struct mmap *map)
{
- BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
+ BUG_ON(map->core.base && refcount_read(&map->core.refcnt) == 0);
- if (refcount_dec_and_test(&map->refcnt))
+ if (refcount_dec_and_test(&map->core.refcnt))
perf_mmap__munmap(map);
}
-void perf_mmap__consume(struct perf_mmap *map)
+void perf_mmap__consume(struct mmap *map)
{
- if (!map->overwrite) {
- u64 old = map->prev;
+ if (!map->core.overwrite) {
+ u64 old = map->core.prev;
perf_mmap__write_tail(map, old);
}
- if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
+ if (refcount_read(&map->core.refcnt) == 1 && perf_mmap__empty(map))
perf_mmap__put(map);
}
@@ -161,13 +162,13 @@ void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __mayb
}
#ifdef HAVE_AIO_SUPPORT
-static int perf_mmap__aio_enabled(struct perf_mmap *map)
+static int perf_mmap__aio_enabled(struct mmap *map)
{
return map->aio.nr_cblocks > 0;
}
#ifdef HAVE_LIBNUMA_SUPPORT
-static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
+static int perf_mmap__aio_alloc(struct mmap *map, int idx)
{
map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
@@ -179,7 +180,7 @@ static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
return 0;
}
-static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
+static void perf_mmap__aio_free(struct mmap *map, int idx)
{
if (map->aio.data[idx]) {
munmap(map->aio.data[idx], perf_mmap__mmap_len(map));
@@ -187,7 +188,7 @@ static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
}
}
-static int perf_mmap__aio_bind(struct perf_mmap *map, int idx, int cpu, int affinity)
+static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
{
void *data;
size_t mmap_len;
@@ -207,7 +208,7 @@ static int perf_mmap__aio_bind(struct perf_mmap *map, int idx, int cpu, int affi
return 0;
}
#else /* !HAVE_LIBNUMA_SUPPORT */
-static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
+static int perf_mmap__aio_alloc(struct mmap *map, int idx)
{
map->aio.data[idx] = malloc(perf_mmap__mmap_len(map));
if (map->aio.data[idx] == NULL)
@@ -216,19 +217,19 @@ static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
return 0;
}
-static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
+static void perf_mmap__aio_free(struct mmap *map, int idx)
{
zfree(&(map->aio.data[idx]));
}
-static int perf_mmap__aio_bind(struct perf_mmap *map __maybe_unused, int idx __maybe_unused,
+static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
int cpu __maybe_unused, int affinity __maybe_unused)
{
return 0;
}
#endif
-static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
+static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
{
int delta_max, i, prio, ret;
@@ -256,7 +257,7 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
pr_debug2("failed to allocate data buffer area, error %m");
return -1;
}
- ret = perf_mmap__aio_bind(map, i, map->cpu, mp->affinity);
+ ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
if (ret == -1)
return -1;
/*
@@ -282,7 +283,7 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
return 0;
}
-static void perf_mmap__aio_munmap(struct perf_mmap *map)
+static void perf_mmap__aio_munmap(struct mmap *map)
{
int i;
@@ -294,34 +295,34 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map)
zfree(&map->aio.aiocb);
}
#else /* !HAVE_AIO_SUPPORT */
-static int perf_mmap__aio_enabled(struct perf_mmap *map __maybe_unused)
+static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
{
return 0;
}
-static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused,
+static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
struct mmap_params *mp __maybe_unused)
{
return 0;
}
-static void perf_mmap__aio_munmap(struct perf_mmap *map __maybe_unused)
+static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
{
}
#endif
-void perf_mmap__munmap(struct perf_mmap *map)
+void perf_mmap__munmap(struct mmap *map)
{
perf_mmap__aio_munmap(map);
if (map->data != NULL) {
munmap(map->data, perf_mmap__mmap_len(map));
map->data = NULL;
}
- if (map->base != NULL) {
- munmap(map->base, perf_mmap__mmap_len(map));
- map->base = NULL;
- map->fd = -1;
- refcount_set(&map->refcnt, 0);
+ if (map->core.base != NULL) {
+ munmap(map->core.base, perf_mmap__mmap_len(map));
+ map->core.base = NULL;
+ map->core.fd = -1;
+ refcount_set(&map->core.refcnt, 0);
}
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
@@ -343,16 +344,16 @@ static void build_node_mask(int node, cpu_set_t *mask)
}
}
-static void perf_mmap__setup_affinity_mask(struct perf_mmap *map, struct mmap_params *mp)
+static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
{
CPU_ZERO(&map->affinity_mask);
if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
- build_node_mask(cpu__get_node(map->cpu), &map->affinity_mask);
+ build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
else if (mp->affinity == PERF_AFFINITY_CPU)
- CPU_SET(map->cpu, &map->affinity_mask);
+ CPU_SET(map->core.cpu, &map->affinity_mask);
}
-int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
+int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
{
/*
* The last one will be done at perf_mmap__consume(), so that we
@@ -367,23 +368,23 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
* evlist layer can't just drop it when filtering events in
* perf_evlist__filter_pollfd().
*/
- refcount_set(&map->refcnt, 2);
- map->prev = 0;
- map->mask = mp->mask;
- map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
+ refcount_set(&map->core.refcnt, 2);
+ map->core.prev = 0;
+ map->core.mask = mp->mask;
+ map->core.base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
MAP_SHARED, fd, 0);
- if (map->base == MAP_FAILED) {
+ if (map->core.base == MAP_FAILED) {
pr_debug2("failed to mmap perf event ring buffer, error %d\n",
errno);
- map->base = NULL;
+ map->core.base = NULL;
return -1;
}
- map->fd = fd;
- map->cpu = cpu;
+ map->core.fd = fd;
+ map->core.cpu = cpu;
perf_mmap__setup_affinity_mask(map, mp);
- map->flush = mp->flush;
+ map->core.flush = mp->flush;
map->comp_level = mp->comp_level;
@@ -399,7 +400,7 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
}
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
- &mp->auxtrace_mp, map->base, fd))
+ &mp->auxtrace_mp, map->core.base, fd))
return -1;
return perf_mmap__aio_mmap(map, mp);
@@ -440,25 +441,25 @@ static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
/*
* Report the start and end of the available data in ringbuffer
*/
-static int __perf_mmap__read_init(struct perf_mmap *md)
+static int __perf_mmap__read_init(struct mmap *md)
{
u64 head = perf_mmap__read_head(md);
- u64 old = md->prev;
- unsigned char *data = md->base + page_size;
+ u64 old = md->core.prev;
+ unsigned char *data = md->core.base + page_size;
unsigned long size;
- md->start = md->overwrite ? head : old;
- md->end = md->overwrite ? old : head;
+ md->core.start = md->core.overwrite ? head : old;
+ md->core.end = md->core.overwrite ? old : head;
- if ((md->end - md->start) < md->flush)
+ if ((md->core.end - md->core.start) < md->core.flush)
return -EAGAIN;
- size = md->end - md->start;
- if (size > (unsigned long)(md->mask) + 1) {
- if (!md->overwrite) {
+ size = md->core.end - md->core.start;
+ if (size > (unsigned long)(md->core.mask) + 1) {
+ if (!md->core.overwrite) {
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
- md->prev = head;
+ md->core.prev = head;
perf_mmap__consume(md);
return -EAGAIN;
}
@@ -467,29 +468,29 @@ static int __perf_mmap__read_init(struct perf_mmap *md)
* Backward ring buffer is full. We still have a chance to read
* most of data from it.
*/
- if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
+ if (overwrite_rb_find_range(data, md->core.mask, &md->core.start, &md->core.end))
return -EINVAL;
}
return 0;
}
-int perf_mmap__read_init(struct perf_mmap *map)
+int perf_mmap__read_init(struct mmap *map)
{
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
- if (!refcount_read(&map->refcnt))
+ if (!refcount_read(&map->core.refcnt))
return -ENOENT;
return __perf_mmap__read_init(map);
}
-int perf_mmap__push(struct perf_mmap *md, void *to,
- int push(struct perf_mmap *map, void *to, void *buf, size_t size))
+int perf_mmap__push(struct mmap *md, void *to,
+ int push(struct mmap *map, void *to, void *buf, size_t size))
{
u64 head = perf_mmap__read_head(md);
- unsigned char *data = md->base + page_size;
+ unsigned char *data = md->core.base + page_size;
unsigned long size;
void *buf;
int rc = 0;
@@ -498,12 +499,12 @@ int perf_mmap__push(struct perf_mmap *md, void *to,
if (rc < 0)
return (rc == -EAGAIN) ? 1 : -1;
- size = md->end - md->start;
+ size = md->core.end - md->core.start;
- if ((md->start & md->mask) + size != (md->end & md->mask)) {
- buf = &data[md->start & md->mask];
- size = md->mask + 1 - (md->start & md->mask);
- md->start += size;
+ if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) {
+ buf = &data[md->core.start & md->core.mask];
+ size = md->core.mask + 1 - (md->core.start & md->core.mask);
+ md->core.start += size;
if (push(md, to, buf, size) < 0) {
rc = -1;
@@ -511,16 +512,16 @@ int perf_mmap__push(struct perf_mmap *md, void *to,
}
}
- buf = &data[md->start & md->mask];
- size = md->end - md->start;
- md->start += size;
+ buf = &data[md->core.start & md->core.mask];
+ size = md->core.end - md->core.start;
+ md->core.start += size;
if (push(md, to, buf, size) < 0) {
rc = -1;
goto out;
}
- md->prev = head;
+ md->core.prev = head;
perf_mmap__consume(md);
out:
return rc;
@@ -529,16 +530,16 @@ out:
/*
* Mandatory for overwrite mode
* The direction of overwrite mode is backward.
- * The last perf_mmap__read() will set tail to map->prev.
- * Need to correct the map->prev to head which is the end of next read.
+ * The last perf_mmap__read() will set tail to map->core.prev.
+ * Need to correct the map->core.prev to head which is the end of next read.
*/
-void perf_mmap__read_done(struct perf_mmap *map)
+void perf_mmap__read_done(struct mmap *map)
{
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
- if (!refcount_read(&map->refcnt))
+ if (!refcount_read(&map->core.refcnt))
return;
- map->prev = perf_mmap__read_head(map);
+ map->core.prev = perf_mmap__read_head(map);
}
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index 3857a49e8f96..e567c1c875bd 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -1,6 +1,7 @@
#ifndef __PERF_MMAP_H
#define __PERF_MMAP_H 1
+#include <internal/mmap.h>
#include <linux/compiler.h>
#include <linux/refcount.h>
#include <linux/types.h>
@@ -15,22 +16,13 @@
struct aiocb;
/**
- * struct perf_mmap - perf's ring buffer mmap details
+ * struct mmap - perf's ring buffer mmap details
*
* @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
*/
-struct perf_mmap {
- void *base;
- int mask;
- int fd;
- int cpu;
- refcount_t refcnt;
- u64 prev;
- u64 start;
- u64 end;
- bool overwrite;
+struct mmap {
+ struct perf_mmap core;
struct auxtrace_mmap auxtrace_mmap;
- char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
#ifdef HAVE_AIO_SUPPORT
struct {
void **data;
@@ -40,71 +32,42 @@ struct perf_mmap {
} aio;
#endif
cpu_set_t affinity_mask;
- u64 flush;
void *data;
int comp_level;
};
-/*
- * State machine of bkw_mmap_state:
- *
- * .________________(forbid)_____________.
- * | V
- * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
- * ^ ^ | ^ |
- * | |__(forbid)____/ |___(forbid)___/|
- * | |
- * \_________________(3)_______________/
- *
- * NOTREADY : Backward ring buffers are not ready
- * RUNNING : Backward ring buffers are recording
- * DATA_PENDING : We are required to collect data from backward ring buffers
- * EMPTY : We have collected data from backward ring buffers.
- *
- * (0): Setup backward ring buffer
- * (1): Pause ring buffers for reading
- * (2): Read from ring buffers
- * (3): Resume ring buffers for recording
- */
-enum bkw_mmap_state {
- BKW_MMAP_NOTREADY,
- BKW_MMAP_RUNNING,
- BKW_MMAP_DATA_PENDING,
- BKW_MMAP_EMPTY,
-};
-
struct mmap_params {
int prot, mask, nr_cblocks, affinity, flush, comp_level;
struct auxtrace_mmap_params auxtrace_mp;
};
-int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu);
-void perf_mmap__munmap(struct perf_mmap *map);
+int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu);
+void perf_mmap__munmap(struct mmap *map);
-void perf_mmap__get(struct perf_mmap *map);
-void perf_mmap__put(struct perf_mmap *map);
+void perf_mmap__get(struct mmap *map);
+void perf_mmap__put(struct mmap *map);
-void perf_mmap__consume(struct perf_mmap *map);
+void perf_mmap__consume(struct mmap *map);
-static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
+static inline u64 perf_mmap__read_head(struct mmap *mm)
{
- return ring_buffer_read_head(mm->base);
+ return ring_buffer_read_head(mm->core.base);
}
-static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
+static inline void perf_mmap__write_tail(struct mmap *md, u64 tail)
{
- ring_buffer_write_tail(md->base, tail);
+ ring_buffer_write_tail(md->core.base, tail);
}
-union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
+union perf_event *perf_mmap__read_forward(struct mmap *map);
-union perf_event *perf_mmap__read_event(struct perf_mmap *map);
+union perf_event *perf_mmap__read_event(struct mmap *map);
-int perf_mmap__push(struct perf_mmap *md, void *to,
- int push(struct perf_mmap *map, void *to, void *buf, size_t size));
+int perf_mmap__push(struct mmap *md, void *to,
+ int push(struct mmap *map, void *to, void *buf, size_t size));
-size_t perf_mmap__mmap_len(struct perf_mmap *map);
+size_t perf_mmap__mmap_len(struct mmap *map);
-int perf_mmap__read_init(struct perf_mmap *md);
-void perf_mmap__read_done(struct perf_mmap *map);
+int perf_mmap__read_init(struct mmap *md);
+void perf_mmap__read_done(struct mmap *map);
#endif /*__PERF_MMAP_H */
diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
index 99be15dd2b6b..285d6f30d912 100644
--- a/tools/perf/util/namespaces.c
+++ b/tools/perf/util/namespaces.c
@@ -17,8 +17,26 @@
#include <string.h>
#include <unistd.h>
#include <asm/bug.h>
+#include <linux/kernel.h>
#include <linux/zalloc.h>
+static const char *perf_ns__names[] = {
+ [NET_NS_INDEX] = "net",
+ [UTS_NS_INDEX] = "uts",
+ [IPC_NS_INDEX] = "ipc",
+ [PID_NS_INDEX] = "pid",
+ [USER_NS_INDEX] = "user",
+ [MNT_NS_INDEX] = "mnt",
+ [CGROUP_NS_INDEX] = "cgroup",
+};
+
+const char *perf_ns__name(unsigned int id)
+{
+ if (id >= ARRAY_SIZE(perf_ns__names))
+ return "UNKNOWN";
+ return perf_ns__names[id];
+}
+
struct namespaces *namespaces__new(struct perf_record_namespaces *event)
{
struct namespaces *namespaces;
diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h
index 40edef56cb52..4b33f684eddd 100644
--- a/tools/perf/util/namespaces.h
+++ b/tools/perf/util/namespaces.h
@@ -66,4 +66,6 @@ static inline void __nsinfo__zput(struct nsinfo **nsip)
#define nsinfo__zput(nsi) __nsinfo__zput(&nsi)
+const char *perf_ns__name(unsigned int id);
+
#endif /* __PERF_NAMESPACES_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 5ec21d21113c..b5e2adef49de 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -30,11 +30,12 @@
#include "parse-events-flex.h"
#include "pmu.h"
#include "thread_map.h"
-#include "cpumap.h"
#include "probe-file.h"
#include "asm/bug.h"
#include "util/parse-branch-options.h"
#include "metricgroup.h"
+#include "util/evsel_config.h"
+#include "util/event.h"
#define MAX_NAME_LEN 100
@@ -335,7 +336,7 @@ __add_event(struct list_head *list, int *idx,
(*idx)++;
evsel->core.cpus = perf_cpu_map__get(cpus);
evsel->core.own_cpus = perf_cpu_map__get(cpus);
- evsel->system_wide = pmu ? pmu->is_uncore : false;
+ evsel->core.system_wide = pmu ? pmu->is_uncore : false;
evsel->auto_merge_stats = auto_merge_stats;
if (name)
@@ -1936,7 +1937,7 @@ int parse_events(struct evlist *evlist, const char *str,
perf_evlist__splice_list_tail(evlist, &parse_state.list);
evlist->nr_groups += parse_state.nr_groups;
- last = perf_evlist__last(evlist);
+ last = evlist__last(evlist);
last->cmdline_group_boundary = true;
return 0;
@@ -2050,7 +2051,7 @@ foreach_evsel_in_last_glob(struct evlist *evlist,
* So no need to WARN here, let *func do this.
*/
if (evlist->core.nr_entries > 0)
- last = perf_evlist__last(evlist);
+ last = evlist__last(evlist);
do {
err = (*func)(last, arg);
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index f1c36ed1cf36..48126ae4cd13 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -9,13 +9,11 @@
#define YYDEBUG 1
#include <fnmatch.h>
+#include <stdio.h>
#include <linux/compiler.h>
-#include <linux/list.h>
#include <linux/types.h>
-#include "util.h"
#include "pmu.h"
#include "evsel.h"
-#include "debug.h"
#include "parse-events.h"
#include "parse-events-bison.h"
diff --git a/tools/perf/util/perf-hooks.c b/tools/perf/util/perf-hooks.c
index e635c594f773..7a0ab3507bd5 100644
--- a/tools/perf/util/perf-hooks.c
+++ b/tools/perf/util/perf-hooks.c
@@ -12,7 +12,6 @@
#include <setjmp.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include "util/util.h"
#include "util/debug.h"
#include "util/perf-hooks.h"
diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c
new file mode 100644
index 000000000000..d4ad3f04923a
--- /dev/null
+++ b/tools/perf/util/perf_event_attr_fprintf.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/perf_event.h>
+#include "util/evsel_fprintf.h"
+
+struct bit_names {
+ int bit;
+ const char *name;
+};
+
+static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
+{
+ bool first_bit = true;
+ int i = 0;
+
+ do {
+ if (value & bits[i].bit) {
+ buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
+ first_bit = false;
+ }
+ } while (bits[++i].name != NULL);
+}
+
+static void __p_sample_type(char *buf, size_t size, u64 value)
+{
+#define bit_name(n) { PERF_SAMPLE_##n, #n }
+ struct bit_names bits[] = {
+ bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
+ bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
+ bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
+ bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
+ bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
+ bit_name(WEIGHT), bit_name(PHYS_ADDR),
+ { .name = NULL, }
+ };
+#undef bit_name
+ __p_bits(buf, size, value, bits);
+}
+
+static void __p_branch_sample_type(char *buf, size_t size, u64 value)
+{
+#define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
+ struct bit_names bits[] = {
+ bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY),
+ bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL),
+ bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
+ bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
+ bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
+ { .name = NULL, }
+ };
+#undef bit_name
+ __p_bits(buf, size, value, bits);
+}
+
+static void __p_read_format(char *buf, size_t size, u64 value)
+{
+#define bit_name(n) { PERF_FORMAT_##n, #n }
+ struct bit_names bits[] = {
+ bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
+ bit_name(ID), bit_name(GROUP),
+ { .name = NULL, }
+ };
+#undef bit_name
+ __p_bits(buf, size, value, bits);
+}
+
+#define BUF_SIZE 1024
+
+#define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
+#define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
+#define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
+#define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
+#define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
+#define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
+
+#define PRINT_ATTRn(_n, _f, _p) \
+do { \
+ if (attr->_f) { \
+ _p(attr->_f); \
+ ret += attr__fprintf(fp, _n, buf, priv);\
+ } \
+} while (0)
+
+#define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
+
+int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
+ attr__fprintf_f attr__fprintf, void *priv)
+{
+ char buf[BUF_SIZE];
+ int ret = 0;
+
+ PRINT_ATTRf(type, p_unsigned);
+ PRINT_ATTRf(size, p_unsigned);
+ PRINT_ATTRf(config, p_hex);
+ PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
+ PRINT_ATTRf(sample_type, p_sample_type);
+ PRINT_ATTRf(read_format, p_read_format);
+
+ PRINT_ATTRf(disabled, p_unsigned);
+ PRINT_ATTRf(inherit, p_unsigned);
+ PRINT_ATTRf(pinned, p_unsigned);
+ PRINT_ATTRf(exclusive, p_unsigned);
+ PRINT_ATTRf(exclude_user, p_unsigned);
+ PRINT_ATTRf(exclude_kernel, p_unsigned);
+ PRINT_ATTRf(exclude_hv, p_unsigned);
+ PRINT_ATTRf(exclude_idle, p_unsigned);
+ PRINT_ATTRf(mmap, p_unsigned);
+ PRINT_ATTRf(comm, p_unsigned);
+ PRINT_ATTRf(freq, p_unsigned);
+ PRINT_ATTRf(inherit_stat, p_unsigned);
+ PRINT_ATTRf(enable_on_exec, p_unsigned);
+ PRINT_ATTRf(task, p_unsigned);
+ PRINT_ATTRf(watermark, p_unsigned);
+ PRINT_ATTRf(precise_ip, p_unsigned);
+ PRINT_ATTRf(mmap_data, p_unsigned);
+ PRINT_ATTRf(sample_id_all, p_unsigned);
+ PRINT_ATTRf(exclude_host, p_unsigned);
+ PRINT_ATTRf(exclude_guest, p_unsigned);
+ PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
+ PRINT_ATTRf(exclude_callchain_user, p_unsigned);
+ PRINT_ATTRf(mmap2, p_unsigned);
+ PRINT_ATTRf(comm_exec, p_unsigned);
+ PRINT_ATTRf(use_clockid, p_unsigned);
+ PRINT_ATTRf(context_switch, p_unsigned);
+ PRINT_ATTRf(write_backward, p_unsigned);
+ PRINT_ATTRf(namespaces, p_unsigned);
+ PRINT_ATTRf(ksymbol, p_unsigned);
+ PRINT_ATTRf(bpf_event, p_unsigned);
+ PRINT_ATTRf(aux_output, p_unsigned);
+
+ PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
+ PRINT_ATTRf(bp_type, p_unsigned);
+ PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
+ PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
+ PRINT_ATTRf(branch_sample_type, p_branch_sample_type);
+ PRINT_ATTRf(sample_regs_user, p_hex);
+ PRINT_ATTRf(sample_stack_user, p_unsigned);
+ PRINT_ATTRf(clockid, p_signed);
+ PRINT_ATTRf(sample_regs_intr, p_hex);
+ PRINT_ATTRf(aux_watermark, p_unsigned);
+ PRINT_ATTRf(sample_max_stack, p_unsigned);
+
+ return ret;
+}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index fb597fa94234..5608da82ad23 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -20,7 +20,6 @@
#include "debug.h"
#include "pmu.h"
#include "parse-events.h"
-#include "cpumap.h"
#include "header.h"
#include "pmu-events/pmu-events.h"
#include "string2.h"
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index b8e0967c5c21..91cab5f669d2 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2331,6 +2331,7 @@ void clear_probe_trace_event(struct probe_trace_event *tev)
}
}
zfree(&tev->args);
+ tev->nargs = 0;
}
struct kprobe_blacklist_node {
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index d13db55a2feb..b659466ea498 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -16,6 +16,7 @@
#include "strlist.h"
#include "strfilter.h"
#include "debug.h"
+#include "build-id.h"
#include "dso.h"
#include "color.h"
#include "symbol.h"
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 505905fc21c5..cd9f95e5044e 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1245,6 +1245,17 @@ static int expand_probe_args(Dwarf_Die *sc_die, struct probe_finder *pf,
return n;
}
+static bool trace_event_finder_overlap(struct trace_event_finder *tf)
+{
+ int i;
+
+ for (i = 0; i < tf->ntevs; i++) {
+ if (tf->pf.addr == tf->tevs[i].point.address)
+ return true;
+ }
+ return false;
+}
+
/* Add a found probe point into trace event list */
static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
{
@@ -1255,6 +1266,14 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
struct perf_probe_arg *args = NULL;
int ret, i;
+ /*
+ * For some reason (e.g. different column assigned to same address)
+ * This callback can be called with the address which already passed.
+ * Ignore it first.
+ */
+ if (trace_event_finder_overlap(tf))
+ return 0;
+
/* Check number of tevs */
if (tf->ntevs == tf->max_tevs) {
pr_warning("Too many( > %d) probe point found.\n",
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index c6dd478956f1..9af183860fbd 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,6 +10,7 @@ util/python.c
util/cap.c
util/evlist.c
util/evsel.c
+util/perf_event_attr_fprintf.c
util/cpumap.c
util/memswap.c
util/mmap.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 07ca4535e6f7..02460362256d 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -6,17 +6,16 @@
#include <linux/err.h>
#include <perf/cpumap.h>
#include <traceevent/event-parse.h>
-#include "debug.h"
#include "evlist.h"
#include "callchain.h"
#include "evsel.h"
#include "event.h"
-#include "cpumap.h"
#include "print_binary.h"
#include "thread_map.h"
#include "trace-event.h"
#include "mmap.h"
-#include "util.h"
+#include "util/env.h"
+#include <internal/lib.h>
#include "../perf-sys.h"
#if PY_MAJOR_VERSION < 3
@@ -56,11 +55,18 @@ int parse_callchain_record(const char *arg __maybe_unused,
}
/*
+ * Add this one here not to drag util/env.c
+ */
+struct perf_env perf_env;
+
+/*
* Support debug printing even though util/debug.c is not linked. That means
* implementing 'verbose' and 'eprintf'.
*/
int verbose;
+int eprintf(int level, int var, const char *fmt, ...);
+
int eprintf(int level, int var, const char *fmt, ...)
{
va_list args;
@@ -884,7 +890,7 @@ static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
{
- perf_evlist__exit(&pevlist->evlist);
+ evlist__exit(&pevlist->evlist);
Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
}
@@ -899,7 +905,7 @@ static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
&pages, &overwrite))
return NULL;
- if (perf_evlist__mmap(evlist, pages) < 0) {
+ if (evlist__mmap(evlist, pages) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
@@ -918,7 +924,7 @@ static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
return NULL;
- n = perf_evlist__poll(evlist, timeout);
+ n = evlist__poll(evlist, timeout);
if (n < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
@@ -935,17 +941,17 @@ static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
PyObject *list = PyList_New(0);
int i;
- for (i = 0; i < evlist->pollfd.nr; ++i) {
+ for (i = 0; i < evlist->core.pollfd.nr; ++i) {
PyObject *file;
#if PY_MAJOR_VERSION < 3
- FILE *fp = fdopen(evlist->pollfd.entries[i].fd, "r");
+ FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r");
if (fp == NULL)
goto free_list;
file = PyFile_FromFile(fp, "perf", "r", NULL);
#else
- file = PyFile_FromFd(evlist->pollfd.entries[i].fd, "perf", "r", -1,
+ file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
NULL, NULL, NULL, 0);
#endif
if (file == NULL)
@@ -984,14 +990,14 @@ static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
return Py_BuildValue("i", evlist->core.nr_entries);
}
-static struct perf_mmap *get_md(struct evlist *evlist, int cpu)
+static struct mmap *get_md(struct evlist *evlist, int cpu)
{
int i;
- for (i = 0; i < evlist->nr_mmaps; i++) {
- struct perf_mmap *md = &evlist->mmap[i];
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ struct mmap *md = &evlist->mmap[i];
- if (md->cpu == cpu)
+ if (md->core.cpu == cpu)
return md;
}
@@ -1005,7 +1011,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
union perf_event *event;
int sample_id_all = 1, cpu;
static char *kwlist[] = { "cpu", "sample_id_all", NULL };
- struct perf_mmap *md;
+ struct mmap *md;
int err;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index 286fe816c0f3..8579505c29a4 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -2,7 +2,6 @@
#include "debug.h"
#include "evlist.h"
#include "evsel.h"
-#include "cpumap.h"
#include "parse-events.h"
#include <errno.h>
#include <limits.h>
@@ -10,7 +9,6 @@
#include <api/fs/fs.h>
#include <subcmd/parse-options.h>
#include <perf/cpumap.h>
-#include "util.h"
#include "cloexec.h"
#include "record.h"
#include "../perf-sys.h"
@@ -32,7 +30,7 @@ static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
if (parse_events(evlist, str, NULL))
goto out_delete;
- evsel = perf_evlist__first(evlist);
+ evsel = evlist__first(evlist);
while (1) {
fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1, flags);
@@ -173,7 +171,7 @@ void perf_evlist__config(struct evlist *evlist, struct record_opts *opts,
use_sample_identifier = perf_can_sample_identifier();
sample_id = true;
} else if (evlist->core.nr_entries > 1) {
- struct evsel *first = perf_evlist__first(evlist);
+ struct evsel *first = evlist__first(evlist);
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.sample_type == first->core.attr.sample_type)
@@ -278,7 +276,7 @@ bool perf_evlist__can_select_event(struct evlist *evlist, const char *str)
if (err)
goto out_delete;
- evsel = perf_evlist__last(temp_evlist);
+ evsel = evlist__last(temp_evlist);
if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) {
struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
diff --git a/tools/perf/util/rwsem.c b/tools/perf/util/rwsem.c
index 5e52e7baa7b6..f3d29d8ddc99 100644
--- a/tools/perf/util/rwsem.c
+++ b/tools/perf/util/rwsem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "util.h"
#include "rwsem.h"
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
index 24a99909d8b3..6785cd87aa4d 100644
--- a/tools/perf/util/s390-cpumsf.c
+++ b/tools/perf/util/s390-cpumsf.c
@@ -151,7 +151,6 @@
#include <sys/stat.h>
#include <sys/types.h>
-#include "cpumap.h"
#include "color.h"
#include "evsel.h"
#include "evlist.h"
diff --git a/tools/perf/util/s390-sample-raw.c b/tools/perf/util/s390-sample-raw.c
index 4d9593e331ea..05b43ab4eeef 100644
--- a/tools/perf/util/s390-sample-raw.c
+++ b/tools/perf/util/s390-sample-raw.c
@@ -22,7 +22,6 @@
#include <asm/byteorder.h>
#include "debug.h"
-#include "util.h"
#include "session.h"
#include "evlist.h"
#include "color.h"
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 666a56e88d8e..5d341efc3237 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -37,7 +37,6 @@
#include "../dso.h"
#include "../callchain.h"
#include "../evsel.h"
-#include "../util.h"
#include "../event.h"
#include "../thread.h"
#include "../comm.h"
@@ -49,7 +48,6 @@
#include "map.h"
#include "symbol.h"
#include "thread_map.h"
-#include "cpumap.h"
#include "print_binary.h"
#include "stat.h"
#include "mem-events.h"
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index e9e4a04f15db..061bb4d6a3f5 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -22,7 +22,6 @@
#include "symbol.h"
#include "session.h"
#include "tool.h"
-#include "cpumap.h"
#include "perf_regs.h"
#include "asm/bug.h"
#include "auxtrace.h"
@@ -30,10 +29,11 @@
#include "thread-stack.h"
#include "sample-raw.h"
#include "stat.h"
-#include "util.h"
#include "ui/progress.h"
#include "../perf.h"
#include "arch/common.h"
+#include <internal/lib.h>
+#include <linux/err.h>
#ifdef HAVE_ZSTD_SUPPORT
static int perf_session__process_compressed_event(struct perf_session *session,
@@ -187,6 +187,7 @@ static int ordered_events__deliver_event(struct ordered_events *oe,
struct perf_session *perf_session__new(struct perf_data *data,
bool repipe, struct perf_tool *tool)
{
+ int ret = -ENOMEM;
struct perf_session *session = zalloc(sizeof(*session));
if (!session)
@@ -201,13 +202,15 @@ struct perf_session *perf_session__new(struct perf_data *data,
perf_env__init(&session->header.env);
if (data) {
- if (perf_data__open(data))
+ ret = perf_data__open(data);
+ if (ret < 0)
goto out_delete;
session->data = data;
if (perf_data__is_read(data)) {
- if (perf_session__open(session) < 0)
+ ret = perf_session__open(session);
+ if (ret < 0)
goto out_delete;
/*
@@ -222,8 +225,11 @@ struct perf_session *perf_session__new(struct perf_data *data,
perf_evlist__init_trace_event_sample_raw(session->evlist);
/* Open the directory data. */
- if (data->is_dir && perf_data__open_dir(data))
+ if (data->is_dir) {
+ ret = perf_data__open_dir(data);
+ if (ret)
goto out_delete;
+ }
}
} else {
session->machines.host.env = &perf_env;
@@ -256,7 +262,7 @@ struct perf_session *perf_session__new(struct perf_data *data,
out_delete:
perf_session__delete(session);
out:
- return NULL;
+ return ERR_PTR(ret);
}
static void perf_session__delete_threads(struct perf_session *session)
@@ -1317,6 +1323,7 @@ static int deliver_sample_value(struct evlist *evlist,
struct machine *machine)
{
struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
+ struct evsel *evsel;
if (sid) {
sample->id = v->id;
@@ -1336,7 +1343,8 @@ static int deliver_sample_value(struct evlist *evlist,
if (!sample->period)
return 0;
- return tool->sample(tool, event, sample, sid->evsel, machine);
+ evsel = container_of(sid->evsel, struct evsel, core);
+ return tool->sample(tool, event, sample, evsel, machine);
}
static int deliver_sample_group(struct evlist *evlist,
@@ -2412,73 +2420,3 @@ int perf_event__process_id_index(struct perf_session *session,
}
return 0;
}
-
-int perf_event__synthesize_id_index(struct perf_tool *tool,
- perf_event__handler_t process,
- struct evlist *evlist,
- struct machine *machine)
-{
- union perf_event *ev;
- struct evsel *evsel;
- size_t nr = 0, i = 0, sz, max_nr, n;
- int err;
-
- pr_debug2("Synthesizing id index\n");
-
- max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
- sizeof(struct id_index_entry);
-
- evlist__for_each_entry(evlist, evsel)
- nr += evsel->ids;
-
- n = nr > max_nr ? max_nr : nr;
- sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
- ev = zalloc(sz);
- if (!ev)
- return -ENOMEM;
-
- ev->id_index.header.type = PERF_RECORD_ID_INDEX;
- ev->id_index.header.size = sz;
- ev->id_index.nr = n;
-
- evlist__for_each_entry(evlist, evsel) {
- u32 j;
-
- for (j = 0; j < evsel->ids; j++) {
- struct id_index_entry *e;
- struct perf_sample_id *sid;
-
- if (i >= n) {
- err = process(tool, ev, NULL, machine);
- if (err)
- goto out_err;
- nr -= n;
- i = 0;
- }
-
- e = &ev->id_index.entries[i++];
-
- e->id = evsel->id[j];
-
- sid = perf_evlist__id2sid(evlist, e->id);
- if (!sid) {
- free(ev);
- return -ENOENT;
- }
-
- e->idx = sid->idx;
- e->cpu = sid->cpu;
- e->tid = sid->tid;
- }
- }
-
- sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
- ev->id_index.header.size = sz;
- ev->id_index.nr = nr;
-
- err = process(tool, ev, NULL, machine);
-out_err:
- free(ev);
-
- return err;
-}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index b7aa076ab6fd..b4c9428c18f0 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -138,9 +138,4 @@ int perf_session__deliver_synth_event(struct perf_session *session,
int perf_event__process_id_index(struct perf_session *session,
union perf_event *event);
-int perf_event__synthesize_id_index(struct perf_tool *tool,
- perf_event__handler_t process,
- struct evlist *evlist,
- struct machine *machine);
-
#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index a2308eb77681..43d1d410854a 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -2329,7 +2329,7 @@ static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
if (nr > evlist->core.nr_entries)
return NULL;
- evsel = perf_evlist__first(evlist);
+ evsel = evlist__first(evlist);
while (--nr > 0)
evsel = perf_evsel__next(evsel);
diff --git a/tools/perf/util/srccode.c b/tools/perf/util/srccode.c
index adfcf1ff464c..d84ed8b6caaa 100644
--- a/tools/perf/util/srccode.c
+++ b/tools/perf/util/srccode.c
@@ -15,7 +15,7 @@
#include <string.h>
#include "srccode.h"
#include "debug.h"
-#include "util.h"
+#include <internal/lib.h> // page_size
#define MAXSRCCACHE (32*1024*1024)
#define MAXSRCFILES 64
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 70c87fdb2a43..2c41d47f6f83 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -738,6 +738,8 @@ static void generic_metric(struct perf_stat_config *config,
char *n, *pn;
expr__ctx_init(&pctx);
+ /* Must be first id entry */
+ expr__add_id(&pctx, name, avg);
for (i = 0; metric_events[i]; i++) {
struct saved_value *v;
struct stats *stats;
@@ -776,8 +778,6 @@ static void generic_metric(struct perf_stat_config *config,
expr__add_id(&pctx, n, avg_stats(stats)*scale);
}
- expr__add_id(&pctx, name, avg);
-
if (!metric_events[i]) {
const char *p = metric_expr;
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 8f1ea27f976f..ebdd130557fb 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -4,6 +4,7 @@
#include <math.h>
#include <string.h>
#include "counts.h"
+#include "cpumap.h"
#include "debug.h"
#include "header.h"
#include "stat.h"
@@ -161,6 +162,15 @@ static void perf_evsel__free_prev_raw_counts(struct evsel *evsel)
evsel->prev_raw_counts = NULL;
}
+static void perf_evsel__reset_prev_raw_counts(struct evsel *evsel)
+{
+ if (evsel->prev_raw_counts) {
+ evsel->prev_raw_counts->aggr.val = 0;
+ evsel->prev_raw_counts->aggr.ena = 0;
+ evsel->prev_raw_counts->aggr.run = 0;
+ }
+}
+
static int perf_evsel__alloc_stats(struct evsel *evsel, bool alloc_raw)
{
int ncpus = perf_evsel__nr_cpus(evsel);
@@ -211,6 +221,14 @@ void perf_evlist__reset_stats(struct evlist *evlist)
}
}
+void perf_evlist__reset_prev_raw_counts(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel)
+ perf_evsel__reset_prev_raw_counts(evsel);
+}
+
static void zero_per_pkg(struct evsel *counter)
{
if (counter->per_pkg_mask)
@@ -318,7 +336,7 @@ static int process_counter_maps(struct perf_stat_config *config,
int ncpus = perf_evsel__nr_cpus(counter);
int cpu, thread;
- if (counter->system_wide)
+ if (counter->core.system_wide)
nthreads = 1;
for (thread = 0; thread < nthreads; thread++) {
@@ -493,45 +511,3 @@ int create_perf_stat_counter(struct evsel *evsel,
return perf_evsel__open_per_thread(evsel, evsel->core.threads);
}
-
-int perf_stat_synthesize_config(struct perf_stat_config *config,
- struct perf_tool *tool,
- struct evlist *evlist,
- perf_event__handler_t process,
- bool attrs)
-{
- int err;
-
- if (attrs) {
- err = perf_event__synthesize_attrs(tool, evlist, process);
- if (err < 0) {
- pr_err("Couldn't synthesize attrs.\n");
- return err;
- }
- }
-
- err = perf_event__synthesize_extra_attr(tool, evlist, process,
- attrs);
-
- err = perf_event__synthesize_thread_map2(tool, evlist->core.threads,
- process, NULL);
- if (err < 0) {
- pr_err("Couldn't synthesize thread map.\n");
- return err;
- }
-
- err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus,
- process, NULL);
- if (err < 0) {
- pr_err("Couldn't synthesize thread map.\n");
- return err;
- }
-
- err = perf_event__synthesize_stat_config(tool, config, process, NULL);
- if (err < 0) {
- pr_err("Couldn't synthesize config.\n");
- return err;
- }
-
- return 0;
-}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 14fe3e548229..edbeb2f63e8d 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -7,8 +7,9 @@
#include <sys/types.h>
#include <sys/resource.h>
#include "rblist.h"
-#include "event.h"
+struct perf_cpu_map;
+struct perf_stat_config;
struct timespec;
struct stats {
@@ -192,6 +193,7 @@ void perf_stat__collect_metric_expr(struct evlist *);
int perf_evlist__alloc_stats(struct evlist *evlist, bool alloc_raw);
void perf_evlist__free_stats(struct evlist *evlist);
void perf_evlist__reset_stats(struct evlist *evlist);
+void perf_evlist__reset_prev_raw_counts(struct evlist *evlist);
int perf_stat_process_counter(struct perf_stat_config *config,
struct evsel *counter);
@@ -210,11 +212,6 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp);
int create_perf_stat_counter(struct evsel *evsel,
struct perf_stat_config *config,
struct target *target);
-int perf_stat_synthesize_config(struct perf_stat_config *config,
- struct perf_tool *tool,
- struct evlist *evlist,
- perf_event__handler_t process,
- bool attrs);
void
perf_evlist__print_counters(struct evlist *evlist,
struct perf_stat_config *config,
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index 582f4a69cd48..96f941e01681 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -17,11 +17,11 @@
#include <linux/string.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
+#include <internal/cpumap.h>
#include <perf/cpumap.h>
#include "env.h"
#include "svghelper.h"
-#include "cpumap.h"
static u64 first_time, last_time;
static u64 turbo_frequency, max_freq;
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 9428639872a6..66f4be1df573 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -7,6 +7,7 @@
#include <unistd.h>
#include <inttypes.h>
+#include "dso.h"
#include "map.h"
#include "map_groups.h"
#include "symbol.h"
@@ -16,10 +17,12 @@
#include "machine.h"
#include "vdso.h"
#include "debug.h"
-#include "util.h"
+#include "util/copyfile.h"
#include <linux/ctype.h>
+#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <symbol/kallsyms.h>
+#include <internal/lib.h>
#ifndef EM_AARCH64
#define EM_AARCH64 183 /* ARM 64 bit */
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index 7e2813ec9498..d6e99af263ec 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -1,8 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
#include "dso.h"
#include "symbol.h"
#include "symsrc.h"
-#include "util.h"
#include <errno.h>
#include <unistd.h>
@@ -13,6 +11,7 @@
#include <byteswap.h>
#include <sys/stat.h>
#include <linux/zalloc.h>
+#include <internal/lib.h>
static bool check_need_swap(int file_endian)
{
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 765c75df2904..a8f80e427674 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -19,7 +19,7 @@
#include "build-id.h"
#include "cap.h"
#include "dso.h"
-#include "util.h"
+#include "util.h" // lsdir()
#include "debug.h"
#include "event.h"
#include "machine.h"
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
new file mode 100644
index 000000000000..807cbca403a7
--- /dev/null
+++ b/tools/perf/util/synthetic-events.c
@@ -0,0 +1,1884 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "util/debug.h"
+#include "util/dso.h"
+#include "util/event.h"
+#include "util/evlist.h"
+#include "util/machine.h"
+#include "util/map.h"
+#include "util/map_symbol.h"
+#include "util/branch.h"
+#include "util/memswap.h"
+#include "util/namespaces.h"
+#include "util/session.h"
+#include "util/stat.h"
+#include "util/symbol.h"
+#include "util/synthetic-events.h"
+#include "util/target.h"
+#include "util/time-utils.h"
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/zalloc.h>
+#include <linux/perf_event.h>
+#include <asm/bug.h>
+#include <perf/evsel.h>
+#include <internal/cpumap.h>
+#include <perf/cpumap.h>
+#include <internal/lib.h> // page_size
+#include <internal/threadmap.h>
+#include <perf/threadmap.h>
+#include <symbol/kallsyms.h>
+#include <dirent.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <string.h>
+#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
+#include <api/fs/fs.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
+
+unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
+
+int perf_tool__process_synth_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct machine *machine,
+ perf_event__handler_t process)
+{
+ struct perf_sample synth_sample = {
+ .pid = -1,
+ .tid = -1,
+ .time = -1,
+ .stream_id = -1,
+ .cpu = -1,
+ .period = 1,
+ .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
+ };
+
+ return process(tool, event, &synth_sample, machine);
+};
+
+/*
+ * Assumes that the first 4095 bytes of /proc/pid/stat contains
+ * the comm, tgid and ppid.
+ */
+static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
+ pid_t *tgid, pid_t *ppid)
+{
+ char filename[PATH_MAX];
+ char bf[4096];
+ int fd;
+ size_t size = 0;
+ ssize_t n;
+ char *name, *tgids, *ppids;
+
+ *tgid = -1;
+ *ppid = -1;
+
+ snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0) {
+ pr_debug("couldn't open %s\n", filename);
+ return -1;
+ }
+
+ n = read(fd, bf, sizeof(bf) - 1);
+ close(fd);
+ if (n <= 0) {
+ pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
+ pid);
+ return -1;
+ }
+ bf[n] = '\0';
+
+ name = strstr(bf, "Name:");
+ tgids = strstr(bf, "Tgid:");
+ ppids = strstr(bf, "PPid:");
+
+ if (name) {
+ char *nl;
+
+ name = skip_spaces(name + 5); /* strlen("Name:") */
+ nl = strchr(name, '\n');
+ if (nl)
+ *nl = '\0';
+
+ size = strlen(name);
+ if (size >= len)
+ size = len - 1;
+ memcpy(comm, name, size);
+ comm[size] = '\0';
+ } else {
+ pr_debug("Name: string not found for pid %d\n", pid);
+ }
+
+ if (tgids) {
+ tgids += 5; /* strlen("Tgid:") */
+ *tgid = atoi(tgids);
+ } else {
+ pr_debug("Tgid: string not found for pid %d\n", pid);
+ }
+
+ if (ppids) {
+ ppids += 5; /* strlen("PPid:") */
+ *ppid = atoi(ppids);
+ } else {
+ pr_debug("PPid: string not found for pid %d\n", pid);
+ }
+
+ return 0;
+}
+
+static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
+ struct machine *machine,
+ pid_t *tgid, pid_t *ppid)
+{
+ size_t size;
+
+ *ppid = -1;
+
+ memset(&event->comm, 0, sizeof(event->comm));
+
+ if (machine__is_host(machine)) {
+ if (perf_event__get_comm_ids(pid, event->comm.comm,
+ sizeof(event->comm.comm),
+ tgid, ppid) != 0) {
+ return -1;
+ }
+ } else {
+ *tgid = machine->pid;
+ }
+
+ if (*tgid < 0)
+ return -1;
+
+ event->comm.pid = *tgid;
+ event->comm.header.type = PERF_RECORD_COMM;
+
+ size = strlen(event->comm.comm) + 1;
+ size = PERF_ALIGN(size, sizeof(u64));
+ memset(event->comm.comm + size, 0, machine->id_hdr_size);
+ event->comm.header.size = (sizeof(event->comm) -
+ (sizeof(event->comm.comm) - size) +
+ machine->id_hdr_size);
+ event->comm.tid = pid;
+
+ return 0;
+}
+
+pid_t perf_event__synthesize_comm(struct perf_tool *tool,
+ union perf_event *event, pid_t pid,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ pid_t tgid, ppid;
+
+ if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
+ return -1;
+
+ if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
+ return -1;
+
+ return tgid;
+}
+
+static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
+ struct perf_ns_link_info *ns_link_info)
+{
+ struct stat64 st;
+ char proc_ns[128];
+
+ sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
+ if (stat64(proc_ns, &st) == 0) {
+ ns_link_info->dev = st.st_dev;
+ ns_link_info->ino = st.st_ino;
+ }
+}
+
+int perf_event__synthesize_namespaces(struct perf_tool *tool,
+ union perf_event *event,
+ pid_t pid, pid_t tgid,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ u32 idx;
+ struct perf_ns_link_info *ns_link_info;
+
+ if (!tool || !tool->namespace_events)
+ return 0;
+
+ memset(&event->namespaces, 0, (sizeof(event->namespaces) +
+ (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
+ machine->id_hdr_size));
+
+ event->namespaces.pid = tgid;
+ event->namespaces.tid = pid;
+
+ event->namespaces.nr_namespaces = NR_NAMESPACES;
+
+ ns_link_info = event->namespaces.link_info;
+
+ for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
+ perf_event__get_ns_link_info(pid, perf_ns__name(idx),
+ &ns_link_info[idx]);
+
+ event->namespaces.header.type = PERF_RECORD_NAMESPACES;
+
+ event->namespaces.header.size = (sizeof(event->namespaces) +
+ (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
+ machine->id_hdr_size);
+
+ if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
+ return -1;
+
+ return 0;
+}
+
+static int perf_event__synthesize_fork(struct perf_tool *tool,
+ union perf_event *event,
+ pid_t pid, pid_t tgid, pid_t ppid,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
+
+ /*
+ * for main thread set parent to ppid from status file. For other
+ * threads set parent pid to main thread. ie., assume main thread
+ * spawns all threads in a process
+ */
+ if (tgid == pid) {
+ event->fork.ppid = ppid;
+ event->fork.ptid = ppid;
+ } else {
+ event->fork.ppid = tgid;
+ event->fork.ptid = tgid;
+ }
+ event->fork.pid = tgid;
+ event->fork.tid = pid;
+ event->fork.header.type = PERF_RECORD_FORK;
+ event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
+
+ event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
+
+ if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
+ return -1;
+
+ return 0;
+}
+
+int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ union perf_event *event,
+ pid_t pid, pid_t tgid,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data)
+{
+ char filename[PATH_MAX];
+ FILE *fp;
+ unsigned long long t;
+ bool truncation = false;
+ unsigned long long timeout = proc_map_timeout * 1000000ULL;
+ int rc = 0;
+ const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
+ int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
+
+ if (machine__is_default_guest(machine))
+ return 0;
+
+ snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
+ machine->root_dir, pid, pid);
+
+ fp = fopen(filename, "r");
+ if (fp == NULL) {
+ /*
+ * We raced with a task exiting - just return:
+ */
+ pr_debug("couldn't open %s\n", filename);
+ return -1;
+ }
+
+ event->header.type = PERF_RECORD_MMAP2;
+ t = rdclock();
+
+ while (1) {
+ char bf[BUFSIZ];
+ char prot[5];
+ char execname[PATH_MAX];
+ char anonstr[] = "//anon";
+ unsigned int ino;
+ size_t size;
+ ssize_t n;
+
+ if (fgets(bf, sizeof(bf), fp) == NULL)
+ break;
+
+ if ((rdclock() - t) > timeout) {
+ pr_warning("Reading %s time out. "
+ "You may want to increase "
+ "the time limit by --proc-map-timeout\n",
+ filename);
+ truncation = true;
+ goto out;
+ }
+
+ /* ensure null termination since stack will be reused. */
+ strcpy(execname, "");
+
+ /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
+ n = sscanf(bf, "%"PRI_lx64"-%"PRI_lx64" %s %"PRI_lx64" %x:%x %u %[^\n]\n",
+ &event->mmap2.start, &event->mmap2.len, prot,
+ &event->mmap2.pgoff, &event->mmap2.maj,
+ &event->mmap2.min,
+ &ino, execname);
+
+ /*
+ * Anon maps don't have the execname.
+ */
+ if (n < 7)
+ continue;
+
+ event->mmap2.ino = (u64)ino;
+
+ /*
+ * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
+ */
+ if (machine__is_host(machine))
+ event->header.misc = PERF_RECORD_MISC_USER;
+ else
+ event->header.misc = PERF_RECORD_MISC_GUEST_USER;
+
+ /* map protection and flags bits */
+ event->mmap2.prot = 0;
+ event->mmap2.flags = 0;
+ if (prot[0] == 'r')
+ event->mmap2.prot |= PROT_READ;
+ if (prot[1] == 'w')
+ event->mmap2.prot |= PROT_WRITE;
+ if (prot[2] == 'x')
+ event->mmap2.prot |= PROT_EXEC;
+
+ if (prot[3] == 's')
+ event->mmap2.flags |= MAP_SHARED;
+ else
+ event->mmap2.flags |= MAP_PRIVATE;
+
+ if (prot[2] != 'x') {
+ if (!mmap_data || prot[0] != 'r')
+ continue;
+
+ event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
+ }
+
+out:
+ if (truncation)
+ event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
+
+ if (!strcmp(execname, ""))
+ strcpy(execname, anonstr);
+
+ if (hugetlbfs_mnt_len &&
+ !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
+ strcpy(execname, anonstr);
+ event->mmap2.flags |= MAP_HUGETLB;
+ }
+
+ size = strlen(execname) + 1;
+ memcpy(event->mmap2.filename, execname, size);
+ size = PERF_ALIGN(size, sizeof(u64));
+ event->mmap2.len -= event->mmap.start;
+ event->mmap2.header.size = (sizeof(event->mmap2) -
+ (sizeof(event->mmap2.filename) - size));
+ memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
+ event->mmap2.header.size += machine->id_hdr_size;
+ event->mmap2.pid = tgid;
+ event->mmap2.tid = pid;
+
+ if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
+ rc = -1;
+ break;
+ }
+
+ if (truncation)
+ break;
+ }
+
+ fclose(fp);
+ return rc;
+}
+
+int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
+ struct machine *machine)
+{
+ int rc = 0;
+ struct map *pos;
+ struct maps *maps = machine__kernel_maps(machine);
+ union perf_event *event = zalloc((sizeof(event->mmap) +
+ machine->id_hdr_size));
+ if (event == NULL) {
+ pr_debug("Not enough memory synthesizing mmap event "
+ "for kernel modules\n");
+ return -1;
+ }
+
+ event->header.type = PERF_RECORD_MMAP;
+
+ /*
+ * kernel uses 0 for user space maps, see kernel/perf_event.c
+ * __perf_event_mmap
+ */
+ if (machine__is_host(machine))
+ event->header.misc = PERF_RECORD_MISC_KERNEL;
+ else
+ event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
+
+ for (pos = maps__first(maps); pos; pos = map__next(pos)) {
+ size_t size;
+
+ if (!__map__is_kmodule(pos))
+ continue;
+
+ size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
+ event->mmap.header.type = PERF_RECORD_MMAP;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size));
+ memset(event->mmap.filename + size, 0, machine->id_hdr_size);
+ event->mmap.header.size += machine->id_hdr_size;
+ event->mmap.start = pos->start;
+ event->mmap.len = pos->end - pos->start;
+ event->mmap.pid = machine->pid;
+
+ memcpy(event->mmap.filename, pos->dso->long_name,
+ pos->dso->long_name_len + 1);
+ if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
+ rc = -1;
+ break;
+ }
+ }
+
+ free(event);
+ return rc;
+}
+
+static int __event__synthesize_thread(union perf_event *comm_event,
+ union perf_event *mmap_event,
+ union perf_event *fork_event,
+ union perf_event *namespaces_event,
+ pid_t pid, int full, perf_event__handler_t process,
+ struct perf_tool *tool, struct machine *machine, bool mmap_data)
+{
+ char filename[PATH_MAX];
+ DIR *tasks;
+ struct dirent *dirent;
+ pid_t tgid, ppid;
+ int rc = 0;
+
+ /* special case: only send one comm event using passed in pid */
+ if (!full) {
+ tgid = perf_event__synthesize_comm(tool, comm_event, pid,
+ process, machine);
+
+ if (tgid == -1)
+ return -1;
+
+ if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
+ tgid, process, machine) < 0)
+ return -1;
+
+ /*
+ * send mmap only for thread group leader
+ * see thread__init_map_groups
+ */
+ if (pid == tgid &&
+ perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+ process, machine, mmap_data))
+ return -1;
+
+ return 0;
+ }
+
+ if (machine__is_default_guest(machine))
+ return 0;
+
+ snprintf(filename, sizeof(filename), "%s/proc/%d/task",
+ machine->root_dir, pid);
+
+ tasks = opendir(filename);
+ if (tasks == NULL) {
+ pr_debug("couldn't open %s\n", filename);
+ return 0;
+ }
+
+ while ((dirent = readdir(tasks)) != NULL) {
+ char *end;
+ pid_t _pid;
+
+ _pid = strtol(dirent->d_name, &end, 10);
+ if (*end)
+ continue;
+
+ rc = -1;
+ if (perf_event__prepare_comm(comm_event, _pid, machine,
+ &tgid, &ppid) != 0)
+ break;
+
+ if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
+ ppid, process, machine) < 0)
+ break;
+
+ if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
+ tgid, process, machine) < 0)
+ break;
+
+ /*
+ * Send the prepared comm event
+ */
+ if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
+ break;
+
+ rc = 0;
+ if (_pid == pid) {
+ /* process the parent's maps too */
+ rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+ process, machine, mmap_data);
+ if (rc)
+ break;
+ }
+ }
+
+ closedir(tasks);
+ return rc;
+}
+
+int perf_event__synthesize_thread_map(struct perf_tool *tool,
+ struct perf_thread_map *threads,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data)
+{
+ union perf_event *comm_event, *mmap_event, *fork_event;
+ union perf_event *namespaces_event;
+ int err = -1, thread, j;
+
+ comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
+ if (comm_event == NULL)
+ goto out;
+
+ mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
+ if (mmap_event == NULL)
+ goto out_free_comm;
+
+ fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
+ if (fork_event == NULL)
+ goto out_free_mmap;
+
+ namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
+ (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
+ machine->id_hdr_size);
+ if (namespaces_event == NULL)
+ goto out_free_fork;
+
+ err = 0;
+ for (thread = 0; thread < threads->nr; ++thread) {
+ if (__event__synthesize_thread(comm_event, mmap_event,
+ fork_event, namespaces_event,
+ perf_thread_map__pid(threads, thread), 0,
+ process, tool, machine,
+ mmap_data)) {
+ err = -1;
+ break;
+ }
+
+ /*
+ * comm.pid is set to thread group id by
+ * perf_event__synthesize_comm
+ */
+ if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
+ bool need_leader = true;
+
+ /* is thread group leader in thread_map? */
+ for (j = 0; j < threads->nr; ++j) {
+ if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
+ need_leader = false;
+ break;
+ }
+ }
+
+ /* if not, generate events for it */
+ if (need_leader &&
+ __event__synthesize_thread(comm_event, mmap_event,
+ fork_event, namespaces_event,
+ comm_event->comm.pid, 0,
+ process, tool, machine,
+ mmap_data)) {
+ err = -1;
+ break;
+ }
+ }
+ }
+ free(namespaces_event);
+out_free_fork:
+ free(fork_event);
+out_free_mmap:
+ free(mmap_event);
+out_free_comm:
+ free(comm_event);
+out:
+ return err;
+}
+
+static int __perf_event__synthesize_threads(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data,
+ struct dirent **dirent,
+ int start,
+ int num)
+{
+ union perf_event *comm_event, *mmap_event, *fork_event;
+ union perf_event *namespaces_event;
+ int err = -1;
+ char *end;
+ pid_t pid;
+ int i;
+
+ comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
+ if (comm_event == NULL)
+ goto out;
+
+ mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
+ if (mmap_event == NULL)
+ goto out_free_comm;
+
+ fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
+ if (fork_event == NULL)
+ goto out_free_mmap;
+
+ namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
+ (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
+ machine->id_hdr_size);
+ if (namespaces_event == NULL)
+ goto out_free_fork;
+
+ for (i = start; i < start + num; i++) {
+ if (!isdigit(dirent[i]->d_name[0]))
+ continue;
+
+ pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
+ /* only interested in proper numerical dirents */
+ if (*end)
+ continue;
+ /*
+ * We may race with exiting thread, so don't stop just because
+ * one thread couldn't be synthesized.
+ */
+ __event__synthesize_thread(comm_event, mmap_event, fork_event,
+ namespaces_event, pid, 1, process,
+ tool, machine, mmap_data);
+ }
+ err = 0;
+
+ free(namespaces_event);
+out_free_fork:
+ free(fork_event);
+out_free_mmap:
+ free(mmap_event);
+out_free_comm:
+ free(comm_event);
+out:
+ return err;
+}
+
+struct synthesize_threads_arg {
+ struct perf_tool *tool;
+ perf_event__handler_t process;
+ struct machine *machine;
+ bool mmap_data;
+ struct dirent **dirent;
+ int num;
+ int start;
+};
+
+static void *synthesize_threads_worker(void *arg)
+{
+ struct synthesize_threads_arg *args = arg;
+
+ __perf_event__synthesize_threads(args->tool, args->process,
+ args->machine, args->mmap_data,
+ args->dirent,
+ args->start, args->num);
+ return NULL;
+}
+
+int perf_event__synthesize_threads(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data,
+ unsigned int nr_threads_synthesize)
+{
+ struct synthesize_threads_arg *args = NULL;
+ pthread_t *synthesize_threads = NULL;
+ char proc_path[PATH_MAX];
+ struct dirent **dirent;
+ int num_per_thread;
+ int m, n, i, j;
+ int thread_nr;
+ int base = 0;
+ int err = -1;
+
+
+ if (machine__is_default_guest(machine))
+ return 0;
+
+ snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
+ n = scandir(proc_path, &dirent, 0, alphasort);
+ if (n < 0)
+ return err;
+
+ if (nr_threads_synthesize == UINT_MAX)
+ thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
+ else
+ thread_nr = nr_threads_synthesize;
+
+ if (thread_nr <= 1) {
+ err = __perf_event__synthesize_threads(tool, process,
+ machine, mmap_data,
+ dirent, base, n);
+ goto free_dirent;
+ }
+ if (thread_nr > n)
+ thread_nr = n;
+
+ synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
+ if (synthesize_threads == NULL)
+ goto free_dirent;
+
+ args = calloc(sizeof(*args), thread_nr);
+ if (args == NULL)
+ goto free_threads;
+
+ num_per_thread = n / thread_nr;
+ m = n % thread_nr;
+ for (i = 0; i < thread_nr; i++) {
+ args[i].tool = tool;
+ args[i].process = process;
+ args[i].machine = machine;
+ args[i].mmap_data = mmap_data;
+ args[i].dirent = dirent;
+ }
+ for (i = 0; i < m; i++) {
+ args[i].num = num_per_thread + 1;
+ args[i].start = i * args[i].num;
+ }
+ if (i != 0)
+ base = args[i-1].start + args[i-1].num;
+ for (j = i; j < thread_nr; j++) {
+ args[j].num = num_per_thread;
+ args[j].start = base + (j - i) * args[i].num;
+ }
+
+ for (i = 0; i < thread_nr; i++) {
+ if (pthread_create(&synthesize_threads[i], NULL,
+ synthesize_threads_worker, &args[i]))
+ goto out_join;
+ }
+ err = 0;
+out_join:
+ for (i = 0; i < thread_nr; i++)
+ pthread_join(synthesize_threads[i], NULL);
+ free(args);
+free_threads:
+ free(synthesize_threads);
+free_dirent:
+ for (i = 0; i < n; i++)
+ zfree(&dirent[i]);
+ free(dirent);
+
+ return err;
+}
+
+int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
+ perf_event__handler_t process __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ return 0;
+}
+
+static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ size_t size;
+ struct map *map = machine__kernel_map(machine);
+ struct kmap *kmap;
+ int err;
+ union perf_event *event;
+
+ if (map == NULL)
+ return -1;
+
+ kmap = map__kmap(map);
+ if (!kmap->ref_reloc_sym)
+ return -1;
+
+ /*
+ * We should get this from /sys/kernel/sections/.text, but till that is
+ * available use this, and after it is use this as a fallback for older
+ * kernels.
+ */
+ event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
+ if (event == NULL) {
+ pr_debug("Not enough memory synthesizing mmap event "
+ "for kernel modules\n");
+ return -1;
+ }
+
+ if (machine__is_host(machine)) {
+ /*
+ * kernel uses PERF_RECORD_MISC_USER for user space maps,
+ * see kernel/perf_event.c __perf_event_mmap
+ */
+ event->header.misc = PERF_RECORD_MISC_KERNEL;
+ } else {
+ event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
+ }
+
+ size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
+ "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
+ size = PERF_ALIGN(size, sizeof(u64));
+ event->mmap.header.type = PERF_RECORD_MMAP;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
+ event->mmap.pgoff = kmap->ref_reloc_sym->addr;
+ event->mmap.start = map->start;
+ event->mmap.len = map->end - event->mmap.start;
+ event->mmap.pid = machine->pid;
+
+ err = perf_tool__process_synth_event(tool, event, machine, process);
+ free(event);
+
+ return err;
+}
+
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ int err;
+
+ err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
+ if (err < 0)
+ return err;
+
+ return perf_event__synthesize_extra_kmaps(tool, process, machine);
+}
+
+int perf_event__synthesize_thread_map2(struct perf_tool *tool,
+ struct perf_thread_map *threads,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ union perf_event *event;
+ int i, err, size;
+
+ size = sizeof(event->thread_map);
+ size += threads->nr * sizeof(event->thread_map.entries[0]);
+
+ event = zalloc(size);
+ if (!event)
+ return -ENOMEM;
+
+ event->header.type = PERF_RECORD_THREAD_MAP;
+ event->header.size = size;
+ event->thread_map.nr = threads->nr;
+
+ for (i = 0; i < threads->nr; i++) {
+ struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
+ char *comm = perf_thread_map__comm(threads, i);
+
+ if (!comm)
+ comm = (char *) "";
+
+ entry->pid = perf_thread_map__pid(threads, i);
+ strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
+ }
+
+ err = process(tool, event, NULL, machine);
+
+ free(event);
+ return err;
+}
+
+static void synthesize_cpus(struct cpu_map_entries *cpus,
+ struct perf_cpu_map *map)
+{
+ int i;
+
+ cpus->nr = map->nr;
+
+ for (i = 0; i < map->nr; i++)
+ cpus->cpu[i] = map->map[i];
+}
+
+static void synthesize_mask(struct perf_record_record_cpu_map *mask,
+ struct perf_cpu_map *map, int max)
+{
+ int i;
+
+ mask->nr = BITS_TO_LONGS(max);
+ mask->long_size = sizeof(long);
+
+ for (i = 0; i < map->nr; i++)
+ set_bit(map->map[i], mask->mask);
+}
+
+static size_t cpus_size(struct perf_cpu_map *map)
+{
+ return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
+}
+
+static size_t mask_size(struct perf_cpu_map *map, int *max)
+{
+ int i;
+
+ *max = 0;
+
+ for (i = 0; i < map->nr; i++) {
+ /* bit possition of the cpu is + 1 */
+ int bit = map->map[i] + 1;
+
+ if (bit > *max)
+ *max = bit;
+ }
+
+ return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
+}
+
+void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
+{
+ size_t size_cpus, size_mask;
+ bool is_dummy = perf_cpu_map__empty(map);
+
+ /*
+ * Both array and mask data have variable size based
+ * on the number of cpus and their actual values.
+ * The size of the 'struct perf_record_cpu_map_data' is:
+ *
+ * array = size of 'struct cpu_map_entries' +
+ * number of cpus * sizeof(u64)
+ *
+ * mask = size of 'struct perf_record_record_cpu_map' +
+ * maximum cpu bit converted to size of longs
+ *
+ * and finaly + the size of 'struct perf_record_cpu_map_data'.
+ */
+ size_cpus = cpus_size(map);
+ size_mask = mask_size(map, max);
+
+ if (is_dummy || (size_cpus < size_mask)) {
+ *size += size_cpus;
+ *type = PERF_CPU_MAP__CPUS;
+ } else {
+ *size += size_mask;
+ *type = PERF_CPU_MAP__MASK;
+ }
+
+ *size += sizeof(struct perf_record_cpu_map_data);
+ *size = PERF_ALIGN(*size, sizeof(u64));
+ return zalloc(*size);
+}
+
+void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
+ u16 type, int max)
+{
+ data->type = type;
+
+ switch (type) {
+ case PERF_CPU_MAP__CPUS:
+ synthesize_cpus((struct cpu_map_entries *) data->data, map);
+ break;
+ case PERF_CPU_MAP__MASK:
+ synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
+ default:
+ break;
+ };
+}
+
+static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
+{
+ size_t size = sizeof(struct perf_record_cpu_map);
+ struct perf_record_cpu_map *event;
+ int max;
+ u16 type;
+
+ event = cpu_map_data__alloc(map, &size, &type, &max);
+ if (!event)
+ return NULL;
+
+ event->header.type = PERF_RECORD_CPU_MAP;
+ event->header.size = size;
+ event->data.type = type;
+
+ cpu_map_data__synthesize(&event->data, map, type, max);
+ return event;
+}
+
+int perf_event__synthesize_cpu_map(struct perf_tool *tool,
+ struct perf_cpu_map *map,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ struct perf_record_cpu_map *event;
+ int err;
+
+ event = cpu_map_event__new(map);
+ if (!event)
+ return -ENOMEM;
+
+ err = process(tool, (union perf_event *) event, NULL, machine);
+
+ free(event);
+ return err;
+}
+
+int perf_event__synthesize_stat_config(struct perf_tool *tool,
+ struct perf_stat_config *config,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ struct perf_record_stat_config *event;
+ int size, i = 0, err;
+
+ size = sizeof(*event);
+ size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
+
+ event = zalloc(size);
+ if (!event)
+ return -ENOMEM;
+
+ event->header.type = PERF_RECORD_STAT_CONFIG;
+ event->header.size = size;
+ event->nr = PERF_STAT_CONFIG_TERM__MAX;
+
+#define ADD(__term, __val) \
+ event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \
+ event->data[i].val = __val; \
+ i++;
+
+ ADD(AGGR_MODE, config->aggr_mode)
+ ADD(INTERVAL, config->interval)
+ ADD(SCALE, config->scale)
+
+ WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
+ "stat config terms unbalanced\n");
+#undef ADD
+
+ err = process(tool, (union perf_event *) event, NULL, machine);
+
+ free(event);
+ return err;
+}
+
+int perf_event__synthesize_stat(struct perf_tool *tool,
+ u32 cpu, u32 thread, u64 id,
+ struct perf_counts_values *count,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ struct perf_record_stat event;
+
+ event.header.type = PERF_RECORD_STAT;
+ event.header.size = sizeof(event);
+ event.header.misc = 0;
+
+ event.id = id;
+ event.cpu = cpu;
+ event.thread = thread;
+ event.val = count->val;
+ event.ena = count->ena;
+ event.run = count->run;
+
+ return process(tool, (union perf_event *) &event, NULL, machine);
+}
+
+int perf_event__synthesize_stat_round(struct perf_tool *tool,
+ u64 evtime, u64 type,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ struct perf_record_stat_round event;
+
+ event.header.type = PERF_RECORD_STAT_ROUND;
+ event.header.size = sizeof(event);
+ event.header.misc = 0;
+
+ event.time = evtime;
+ event.type = type;
+
+ return process(tool, (union perf_event *) &event, NULL, machine);
+}
+
+size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
+{
+ size_t sz, result = sizeof(struct perf_record_sample);
+
+ if (type & PERF_SAMPLE_IDENTIFIER)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_IP)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_TID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_TIME)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_ADDR)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_ID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_STREAM_ID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_CPU)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_PERIOD)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_READ) {
+ result += sizeof(u64);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ result += sizeof(u64);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ result += sizeof(u64);
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ sz = sample->read.group.nr *
+ sizeof(struct sample_read_value);
+ result += sz;
+ } else {
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ sz = (sample->callchain->nr + 1) * sizeof(u64);
+ result += sz;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ result += sizeof(u32);
+ result += sample->raw_size;
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ sz = sample->branch_stack->nr * sizeof(struct branch_entry);
+ sz += sizeof(u64);
+ result += sz;
+ }
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ if (sample->user_regs.abi) {
+ result += sizeof(u64);
+ sz = hweight64(sample->user_regs.mask) * sizeof(u64);
+ result += sz;
+ } else {
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ sz = sample->user_stack.size;
+ result += sizeof(u64);
+ if (sz) {
+ result += sz;
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_WEIGHT)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_DATA_SRC)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_TRANSACTION)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_REGS_INTR) {
+ if (sample->intr_regs.abi) {
+ result += sizeof(u64);
+ sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
+ result += sz;
+ } else {
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_PHYS_ADDR)
+ result += sizeof(u64);
+
+ return result;
+}
+
+int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
+ const struct perf_sample *sample)
+{
+ __u64 *array;
+ size_t sz;
+ /*
+ * used for cross-endian analysis. See git commit 65014ab3
+ * for why this goofiness is needed.
+ */
+ union u64_swap u;
+
+ array = event->sample.array;
+
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ *array = sample->id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_IP) {
+ *array = sample->ip;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u.val32[0] = sample->pid;
+ u.val32[1] = sample->tid;
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ *array = sample->time;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ADDR) {
+ *array = sample->addr;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ *array = sample->id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ *array = sample->stream_id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ u.val32[0] = sample->cpu;
+ u.val32[1] = 0;
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_PERIOD) {
+ *array = sample->period;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_READ) {
+ if (read_format & PERF_FORMAT_GROUP)
+ *array = sample->read.group.nr;
+ else
+ *array = sample->read.one.value;
+ array++;
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ *array = sample->read.time_enabled;
+ array++;
+ }
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ *array = sample->read.time_running;
+ array++;
+ }
+
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ sz = sample->read.group.nr *
+ sizeof(struct sample_read_value);
+ memcpy(array, sample->read.group.values, sz);
+ array = (void *)array + sz;
+ } else {
+ *array = sample->read.one.id;
+ array++;
+ }
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ sz = (sample->callchain->nr + 1) * sizeof(u64);
+ memcpy(array, sample->callchain, sz);
+ array = (void *)array + sz;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ u.val32[0] = sample->raw_size;
+ *array = u.val64;
+ array = (void *)array + sizeof(u32);
+
+ memcpy(array, sample->raw_data, sample->raw_size);
+ array = (void *)array + sample->raw_size;
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ sz = sample->branch_stack->nr * sizeof(struct branch_entry);
+ sz += sizeof(u64);
+ memcpy(array, sample->branch_stack, sz);
+ array = (void *)array + sz;
+ }
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ if (sample->user_regs.abi) {
+ *array++ = sample->user_regs.abi;
+ sz = hweight64(sample->user_regs.mask) * sizeof(u64);
+ memcpy(array, sample->user_regs.regs, sz);
+ array = (void *)array + sz;
+ } else {
+ *array++ = 0;
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ sz = sample->user_stack.size;
+ *array++ = sz;
+ if (sz) {
+ memcpy(array, sample->user_stack.data, sz);
+ array = (void *)array + sz;
+ *array++ = sz;
+ }
+ }
+
+ if (type & PERF_SAMPLE_WEIGHT) {
+ *array = sample->weight;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_DATA_SRC) {
+ *array = sample->data_src;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TRANSACTION) {
+ *array = sample->transaction;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_REGS_INTR) {
+ if (sample->intr_regs.abi) {
+ *array++ = sample->intr_regs.abi;
+ sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
+ memcpy(array, sample->intr_regs.regs, sz);
+ array = (void *)array + sz;
+ } else {
+ *array++ = 0;
+ }
+ }
+
+ if (type & PERF_SAMPLE_PHYS_ADDR) {
+ *array = sample->phys_addr;
+ array++;
+ }
+
+ return 0;
+}
+
+int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
+ struct evlist *evlist, struct machine *machine)
+{
+ union perf_event *ev;
+ struct evsel *evsel;
+ size_t nr = 0, i = 0, sz, max_nr, n;
+ int err;
+
+ pr_debug2("Synthesizing id index\n");
+
+ max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
+ sizeof(struct id_index_entry);
+
+ evlist__for_each_entry(evlist, evsel)
+ nr += evsel->core.ids;
+
+ n = nr > max_nr ? max_nr : nr;
+ sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
+ ev = zalloc(sz);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->id_index.header.type = PERF_RECORD_ID_INDEX;
+ ev->id_index.header.size = sz;
+ ev->id_index.nr = n;
+
+ evlist__for_each_entry(evlist, evsel) {
+ u32 j;
+
+ for (j = 0; j < evsel->core.ids; j++) {
+ struct id_index_entry *e;
+ struct perf_sample_id *sid;
+
+ if (i >= n) {
+ err = process(tool, ev, NULL, machine);
+ if (err)
+ goto out_err;
+ nr -= n;
+ i = 0;
+ }
+
+ e = &ev->id_index.entries[i++];
+
+ e->id = evsel->core.id[j];
+
+ sid = perf_evlist__id2sid(evlist, e->id);
+ if (!sid) {
+ free(ev);
+ return -ENOENT;
+ }
+
+ e->idx = sid->idx;
+ e->cpu = sid->cpu;
+ e->tid = sid->tid;
+ }
+ }
+
+ sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
+ ev->id_index.header.size = sz;
+ ev->id_index.nr = nr;
+
+ err = process(tool, ev, NULL, machine);
+out_err:
+ free(ev);
+
+ return err;
+}
+
+int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
+ struct target *target, struct perf_thread_map *threads,
+ perf_event__handler_t process, bool data_mmap,
+ unsigned int nr_threads_synthesize)
+{
+ if (target__has_task(target))
+ return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
+ else if (target__has_cpu(target))
+ return perf_event__synthesize_threads(tool, process,
+ machine, data_mmap,
+ nr_threads_synthesize);
+ /* command specified */
+ return 0;
+}
+
+int machine__synthesize_threads(struct machine *machine, struct target *target,
+ struct perf_thread_map *threads, bool data_mmap,
+ unsigned int nr_threads_synthesize)
+{
+ return __machine__synthesize_threads(machine, NULL, target, threads,
+ perf_event__process, data_mmap,
+ nr_threads_synthesize);
+}
+
+static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
+{
+ struct perf_record_event_update *ev;
+
+ size += sizeof(*ev);
+ size = PERF_ALIGN(size, sizeof(u64));
+
+ ev = zalloc(size);
+ if (ev) {
+ ev->header.type = PERF_RECORD_EVENT_UPDATE;
+ ev->header.size = (u16)size;
+ ev->type = type;
+ ev->id = id;
+ }
+ return ev;
+}
+
+int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
+ perf_event__handler_t process)
+{
+ size_t size = strlen(evsel->unit);
+ struct perf_record_event_update *ev;
+ int err;
+
+ ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
+ if (ev == NULL)
+ return -ENOMEM;
+
+ strlcpy(ev->data, evsel->unit, size + 1);
+ err = process(tool, (union perf_event *)ev, NULL, NULL);
+ free(ev);
+ return err;
+}
+
+int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
+ perf_event__handler_t process)
+{
+ struct perf_record_event_update *ev;
+ struct perf_record_event_update_scale *ev_data;
+ int err;
+
+ ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
+ if (ev == NULL)
+ return -ENOMEM;
+
+ ev_data = (struct perf_record_event_update_scale *)ev->data;
+ ev_data->scale = evsel->scale;
+ err = process(tool, (union perf_event *)ev, NULL, NULL);
+ free(ev);
+ return err;
+}
+
+int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
+ perf_event__handler_t process)
+{
+ struct perf_record_event_update *ev;
+ size_t len = strlen(evsel->name);
+ int err;
+
+ ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
+ if (ev == NULL)
+ return -ENOMEM;
+
+ strlcpy(ev->data, evsel->name, len + 1);
+ err = process(tool, (union perf_event *)ev, NULL, NULL);
+ free(ev);
+ return err;
+}
+
+int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
+ perf_event__handler_t process)
+{
+ size_t size = sizeof(struct perf_record_event_update);
+ struct perf_record_event_update *ev;
+ int max, err;
+ u16 type;
+
+ if (!evsel->core.own_cpus)
+ return 0;
+
+ ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->header.type = PERF_RECORD_EVENT_UPDATE;
+ ev->header.size = (u16)size;
+ ev->type = PERF_EVENT_UPDATE__CPUS;
+ ev->id = evsel->core.id[0];
+
+ cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
+ evsel->core.own_cpus, type, max);
+
+ err = process(tool, (union perf_event *)ev, NULL, NULL);
+ free(ev);
+ return err;
+}
+
+int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
+ perf_event__handler_t process)
+{
+ struct evsel *evsel;
+ int err = 0;
+
+ evlist__for_each_entry(evlist, evsel) {
+ err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
+ evsel->core.id, process);
+ if (err) {
+ pr_debug("failed to create perf header attribute\n");
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static bool has_unit(struct evsel *evsel)
+{
+ return evsel->unit && *evsel->unit;
+}
+
+static bool has_scale(struct evsel *evsel)
+{
+ return evsel->scale != 1;
+}
+
+int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
+ perf_event__handler_t process, bool is_pipe)
+{
+ struct evsel *evsel;
+ int err;
+
+ /*
+ * Synthesize other events stuff not carried within
+ * attr event - unit, scale, name
+ */
+ evlist__for_each_entry(evsel_list, evsel) {
+ if (!evsel->supported)
+ continue;
+
+ /*
+ * Synthesize unit and scale only if it's defined.
+ */
+ if (has_unit(evsel)) {
+ err = perf_event__synthesize_event_update_unit(tool, evsel, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel unit.\n");
+ return err;
+ }
+ }
+
+ if (has_scale(evsel)) {
+ err = perf_event__synthesize_event_update_scale(tool, evsel, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel evsel.\n");
+ return err;
+ }
+ }
+
+ if (evsel->core.own_cpus) {
+ err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel cpus.\n");
+ return err;
+ }
+ }
+
+ /*
+ * Name is needed only for pipe output,
+ * perf.data carries event names.
+ */
+ if (is_pipe) {
+ err = perf_event__synthesize_event_update_name(tool, evsel, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel name.\n");
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
+int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
+ u32 ids, u64 *id, perf_event__handler_t process)
+{
+ union perf_event *ev;
+ size_t size;
+ int err;
+
+ size = sizeof(struct perf_event_attr);
+ size = PERF_ALIGN(size, sizeof(u64));
+ size += sizeof(struct perf_event_header);
+ size += ids * sizeof(u64);
+
+ ev = zalloc(size);
+
+ if (ev == NULL)
+ return -ENOMEM;
+
+ ev->attr.attr = *attr;
+ memcpy(ev->attr.id, id, ids * sizeof(u64));
+
+ ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
+ ev->attr.header.size = (u16)size;
+
+ if (ev->attr.header.size == size)
+ err = process(tool, ev, NULL, NULL);
+ else
+ err = -E2BIG;
+
+ free(ev);
+
+ return err;
+}
+
+int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
+ perf_event__handler_t process)
+{
+ union perf_event ev;
+ struct tracing_data *tdata;
+ ssize_t size = 0, aligned_size = 0, padding;
+ struct feat_fd ff;
+
+ /*
+ * We are going to store the size of the data followed
+ * by the data contents. Since the fd descriptor is a pipe,
+ * we cannot seek back to store the size of the data once
+ * we know it. Instead we:
+ *
+ * - write the tracing data to the temp file
+ * - get/write the data size to pipe
+ * - write the tracing data from the temp file
+ * to the pipe
+ */
+ tdata = tracing_data_get(&evlist->core.entries, fd, true);
+ if (!tdata)
+ return -1;
+
+ memset(&ev, 0, sizeof(ev));
+
+ ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
+ size = tdata->size;
+ aligned_size = PERF_ALIGN(size, sizeof(u64));
+ padding = aligned_size - size;
+ ev.tracing_data.header.size = sizeof(ev.tracing_data);
+ ev.tracing_data.size = aligned_size;
+
+ process(tool, &ev, NULL, NULL);
+
+ /*
+ * The put function will copy all the tracing data
+ * stored in temp file to the pipe.
+ */
+ tracing_data_put(tdata);
+
+ ff = (struct feat_fd){ .fd = fd };
+ if (write_padded(&ff, NULL, 0, padding))
+ return -1;
+
+ return aligned_size;
+}
+
+int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
+ perf_event__handler_t process, struct machine *machine)
+{
+ union perf_event ev;
+ size_t len;
+
+ if (!pos->hit)
+ return 0;
+
+ memset(&ev, 0, sizeof(ev));
+
+ len = pos->long_name_len + 1;
+ len = PERF_ALIGN(len, NAME_ALIGN);
+ memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
+ ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
+ ev.build_id.header.misc = misc;
+ ev.build_id.pid = machine->pid;
+ ev.build_id.header.size = sizeof(ev.build_id) + len;
+ memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
+
+ return process(tool, &ev, NULL, machine);
+}
+
+int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
+ struct evlist *evlist, perf_event__handler_t process, bool attrs)
+{
+ int err;
+
+ if (attrs) {
+ err = perf_event__synthesize_attrs(tool, evlist, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize attrs.\n");
+ return err;
+ }
+ }
+
+ err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
+ err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize thread map.\n");
+ return err;
+ }
+
+ err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize thread map.\n");
+ return err;
+ }
+
+ err = perf_event__synthesize_stat_config(tool, config, process, NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize config.\n");
+ return err;
+ }
+
+ return 0;
+}
+
+int __weak perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
+ struct perf_tool *tool __maybe_unused,
+ perf_event__handler_t process __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ return 0;
+}
+
+extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
+
+int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
+ struct evlist *evlist, perf_event__handler_t process)
+{
+ struct perf_header *header = &session->header;
+ struct perf_record_header_feature *fe;
+ struct feat_fd ff;
+ size_t sz, sz_hdr;
+ int feat, ret;
+
+ sz_hdr = sizeof(fe->header);
+ sz = sizeof(union perf_event);
+ /* get a nice alignment */
+ sz = PERF_ALIGN(sz, page_size);
+
+ memset(&ff, 0, sizeof(ff));
+
+ ff.buf = malloc(sz);
+ if (!ff.buf)
+ return -ENOMEM;
+
+ ff.size = sz - sz_hdr;
+ ff.ph = &session->header;
+
+ for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
+ if (!feat_ops[feat].synthesize) {
+ pr_debug("No record header feature for header :%d\n", feat);
+ continue;
+ }
+
+ ff.offset = sizeof(*fe);
+
+ ret = feat_ops[feat].write(&ff, evlist);
+ if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
+ pr_debug("Error writing feature\n");
+ continue;
+ }
+ /* ff.buf may have changed due to realloc in do_write() */
+ fe = ff.buf;
+ memset(fe, 0, sizeof(*fe));
+
+ fe->feat_id = feat;
+ fe->header.type = PERF_RECORD_HEADER_FEATURE;
+ fe->header.size = ff.offset;
+
+ ret = process(tool, ff.buf, NULL, NULL);
+ if (ret) {
+ free(ff.buf);
+ return ret;
+ }
+ }
+
+ /* Send HEADER_LAST_FEATURE mark. */
+ fe = ff.buf;
+ fe->feat_id = HEADER_LAST_FEATURE;
+ fe->header.type = PERF_RECORD_HEADER_FEATURE;
+ fe->header.size = sizeof(*fe);
+
+ ret = process(tool, ff.buf, NULL, NULL);
+
+ free(ff.buf);
+ return ret;
+}
diff --git a/tools/perf/util/synthetic-events.h b/tools/perf/util/synthetic-events.h
new file mode 100644
index 000000000000..baead0cdc381
--- /dev/null
+++ b/tools/perf/util/synthetic-events.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_SYNTHETIC_EVENTS_H
+#define __PERF_SYNTHETIC_EVENTS_H
+
+#include <stdbool.h>
+#include <sys/types.h> // pid_t
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+struct auxtrace_record;
+struct dso;
+struct evlist;
+struct evsel;
+struct machine;
+struct perf_counts_values;
+struct perf_cpu_map;
+struct perf_event_attr;
+struct perf_event_mmap_page;
+struct perf_sample;
+struct perf_session;
+struct perf_stat_config;
+struct perf_thread_map;
+struct perf_tool;
+struct record_opts;
+struct target;
+
+union perf_event;
+
+typedef int (*perf_event__handler_t)(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine);
+
+int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist, perf_event__handler_t process);
+int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr, u32 ids, u64 *id, perf_event__handler_t process);
+int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_cpu_map(struct perf_tool *tool, struct perf_cpu_map *cpus, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
+int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
+int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
+int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
+int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list, perf_event__handler_t process, bool is_pipe);
+int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session, struct evlist *evlist, perf_event__handler_t process);
+int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process, struct evlist *evlist, struct machine *machine);
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_mmap_events(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine, bool mmap_data);
+int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_namespaces(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format, const struct perf_sample *sample);
+int perf_event__synthesize_stat_config(struct perf_tool *tool, struct perf_stat_config *config, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool, struct evlist *evlist, perf_event__handler_t process, bool attrs);
+int perf_event__synthesize_stat_round(struct perf_tool *tool, u64 time, u64 type, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_stat(struct perf_tool *tool, u32 cpu, u32 thread, u64 id, struct perf_counts_values *count, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_thread_map2(struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_thread_map(struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine, bool mmap_data);
+int perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine, bool mmap_data, unsigned int nr_threads_synthesize);
+int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist, perf_event__handler_t process);
+int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc, struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
+pid_t perf_event__synthesize_comm(struct perf_tool *tool, union perf_event *event, pid_t pid, perf_event__handler_t process, struct machine *machine);
+
+int perf_tool__process_synth_event(struct perf_tool *tool, union perf_event *event, struct machine *machine, perf_event__handler_t process);
+
+size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format);
+
+int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
+ struct target *target, struct perf_thread_map *threads,
+ perf_event__handler_t process, bool data_mmap,
+ unsigned int nr_threads_synthesize);
+int machine__synthesize_threads(struct machine *machine, struct target *target,
+ struct perf_thread_map *threads, bool data_mmap,
+ unsigned int nr_threads_synthesize);
+
+#ifdef HAVE_AUXTRACE_SUPPORT
+int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr, struct perf_tool *tool,
+ struct perf_session *session, perf_event__handler_t process);
+
+#else // HAVE_AUXTRACE_SUPPORT
+
+#include <errno.h>
+
+static inline int
+perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr __maybe_unused,
+ struct perf_tool *tool __maybe_unused,
+ struct perf_session *session __maybe_unused,
+ perf_event__handler_t process __maybe_unused)
+{
+ return -EINVAL;
+}
+#endif // HAVE_AUXTRACE_SUPPORT
+
+#ifdef HAVE_LIBBPF_SUPPORT
+int perf_event__synthesize_bpf_events(struct perf_session *session, perf_event__handler_t process,
+ struct machine *machine, struct record_opts *opts);
+#else // HAVE_LIBBPF_SUPPORT
+static inline int perf_event__synthesize_bpf_events(struct perf_session *session __maybe_unused,
+ perf_event__handler_t process __maybe_unused,
+ struct machine *machine __maybe_unused,
+ struct record_opts *opts __maybe_unused)
+{
+ return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
+#endif // __PERF_SYNTHETIC_EVENTS_H
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
index 565f7aef7e6c..a3db13dea937 100644
--- a/tools/perf/util/target.c
+++ b/tools/perf/util/target.c
@@ -6,8 +6,6 @@
*/
#include "target.h"
-#include "util.h"
-#include "debug.h"
#include <pwd.h>
#include <stdio.h>
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index 51fb574998bb..3dce2de9d005 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -5,7 +5,6 @@
* Refactored from builtin-top.c, see that files for further copyright notes.
*/
-#include "cpumap.h"
#include "event.h"
#include "evlist.h"
#include "evsel.h"
@@ -72,7 +71,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
}
if (top->evlist->core.nr_entries == 1) {
- struct evsel *first = perf_evlist__first(top->evlist);
+ struct evsel *first = evlist__first(top->evlist);
ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
(uint64_t)first->core.attr.sample_period,
opts->freq ? "Hz" : "");
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index d63d542b2cde..086e98ff42a3 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2008,2009, Steven Rostedt <srostedt@redhat.com>
*/
-#include "util.h"
#include <dirent.h>
#include <mntent.h>
#include <stdio.h>
@@ -19,6 +18,7 @@
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
+#include <internal/lib.h> // page_size
#include "trace-event.h"
#include <api/fs/tracing_path.h>
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index b6c0db068be0..8593d3c200c6 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -15,7 +15,6 @@
#include <unistd.h>
#include <errno.h>
-#include "util.h"
#include "trace-event.h"
#include "debug.h"
diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c
index 01b9d89bf5bf..b3ee651e3d91 100644
--- a/tools/perf/util/trace-event.c
+++ b/tools/perf/util/trace-event.c
@@ -14,7 +14,6 @@
#include <api/fs/fs.h>
#include "trace-event.h"
#include "machine.h"
-#include "util.h"
/*
* global trace_event object used by trace_event__tp_format
diff --git a/tools/perf/util/tsc.h b/tools/perf/util/tsc.h
index e0c3af34ac8d..3c5a632ee57c 100644
--- a/tools/perf/util/tsc.h
+++ b/tools/perf/util/tsc.h
@@ -4,13 +4,12 @@
#include <linux/types.h>
-#include "event.h"
-
struct perf_tsc_conversion {
u16 time_shift;
u32 time_mult;
u64 time_zero;
};
+
struct perf_event_mmap_page;
int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
@@ -20,13 +19,4 @@ u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc);
u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc);
u64 rdtsc(void);
-struct perf_event_mmap_page;
-struct perf_tool;
-struct machine;
-
-int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc,
- struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine);
-
-#endif
+#endif // __PERF_TSC_H
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 9ece188ae48a..15f6e46d7124 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -17,7 +17,6 @@
#include "event.h"
#include "perf_regs.h"
#include "callchain.h"
-#include "util.h"
static char *debuginfo_path;
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index ebdbb056510c..1800887b2255 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -37,7 +37,6 @@
#include "unwind.h"
#include "map.h"
#include "symbol.h"
-#include "util.h"
#include "debug.h"
#include "asm/bug.h"
#include "dso.h"
diff --git a/tools/perf/util/usage.c b/tools/perf/util/usage.c
index 3949a60b00ae..196438ee4c9d 100644
--- a/tools/perf/util/usage.c
+++ b/tools/perf/util/usage.c
@@ -8,7 +8,6 @@
* Copyright (C) Linus Torvalds, 2005
*/
#include "util.h"
-#include "debug.h"
#include <stdio.h>
#include <stdlib.h>
#include <linux/compiler.h>
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 32322a20a68b..5eda6e19c947 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -2,9 +2,7 @@
#include "util.h"
#include "debug.h"
#include "event.h"
-#include "namespaces.h"
#include <api/fs/fs.h>
-#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/utsname.h>
#include <dirent.h>
@@ -41,8 +39,6 @@ void perf_set_multithreaded(void)
perf_singlethreaded = false;
}
-unsigned int page_size;
-
int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
int sysctl_perf_event_max_contexts_per_stack = PERF_MAX_CONTEXTS_PER_STACK;
@@ -234,138 +230,6 @@ out:
return list;
}
-static int slow_copyfile(const char *from, const char *to, struct nsinfo *nsi)
-{
- int err = -1;
- char *line = NULL;
- size_t n;
- FILE *from_fp, *to_fp;
- struct nscookie nsc;
-
- nsinfo__mountns_enter(nsi, &nsc);
- from_fp = fopen(from, "r");
- nsinfo__mountns_exit(&nsc);
- if (from_fp == NULL)
- goto out;
-
- to_fp = fopen(to, "w");
- if (to_fp == NULL)
- goto out_fclose_from;
-
- while (getline(&line, &n, from_fp) > 0)
- if (fputs(line, to_fp) == EOF)
- goto out_fclose_to;
- err = 0;
-out_fclose_to:
- fclose(to_fp);
- free(line);
-out_fclose_from:
- fclose(from_fp);
-out:
- return err;
-}
-
-int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
-{
- void *ptr;
- loff_t pgoff;
-
- pgoff = off_in & ~(page_size - 1);
- off_in -= pgoff;
-
- ptr = mmap(NULL, off_in + size, PROT_READ, MAP_PRIVATE, ifd, pgoff);
- if (ptr == MAP_FAILED)
- return -1;
-
- while (size) {
- ssize_t ret = pwrite(ofd, ptr + off_in, size, off_out);
- if (ret < 0 && errno == EINTR)
- continue;
- if (ret <= 0)
- break;
-
- size -= ret;
- off_in += ret;
- off_out += ret;
- }
- munmap(ptr, off_in + size);
-
- return size ? -1 : 0;
-}
-
-static int copyfile_mode_ns(const char *from, const char *to, mode_t mode,
- struct nsinfo *nsi)
-{
- int fromfd, tofd;
- struct stat st;
- int err;
- char *tmp = NULL, *ptr = NULL;
- struct nscookie nsc;
-
- nsinfo__mountns_enter(nsi, &nsc);
- err = stat(from, &st);
- nsinfo__mountns_exit(&nsc);
- if (err)
- goto out;
- err = -1;
-
- /* extra 'x' at the end is to reserve space for '.' */
- if (asprintf(&tmp, "%s.XXXXXXx", to) < 0) {
- tmp = NULL;
- goto out;
- }
- ptr = strrchr(tmp, '/');
- if (!ptr)
- goto out;
- ptr = memmove(ptr + 1, ptr, strlen(ptr) - 1);
- *ptr = '.';
-
- tofd = mkstemp(tmp);
- if (tofd < 0)
- goto out;
-
- if (fchmod(tofd, mode))
- goto out_close_to;
-
- if (st.st_size == 0) { /* /proc? do it slowly... */
- err = slow_copyfile(from, tmp, nsi);
- goto out_close_to;
- }
-
- nsinfo__mountns_enter(nsi, &nsc);
- fromfd = open(from, O_RDONLY);
- nsinfo__mountns_exit(&nsc);
- if (fromfd < 0)
- goto out_close_to;
-
- err = copyfile_offset(fromfd, 0, tofd, 0, st.st_size);
-
- close(fromfd);
-out_close_to:
- close(tofd);
- if (!err)
- err = link(tmp, to);
- unlink(tmp);
-out:
- free(tmp);
- return err;
-}
-
-int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi)
-{
- return copyfile_mode_ns(from, to, 0755, nsi);
-}
-
-int copyfile_mode(const char *from, const char *to, mode_t mode)
-{
- return copyfile_mode_ns(from, to, mode, NULL);
-}
-
-int copyfile(const char *from, const char *to)
-{
- return copyfile_mode(from, to, 0755);
-}
-
size_t hex_width(u64 v)
{
size_t n = 1;
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 45a5c6f20197..9969b8b46f7c 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -11,14 +11,12 @@
#include <stddef.h>
#include <linux/compiler.h>
#include <sys/types.h>
-#include <internal/lib.h>
/* General helper functions */
void usage(const char *err) __noreturn;
void die(const char *err, ...) __noreturn __printf(1, 2);
struct dirent;
-struct nsinfo;
struct strlist;
int mkdir_p(char *path, mode_t mode);
@@ -26,15 +24,9 @@ int rm_rf(const char *path);
int rm_rf_perf_data(const char *path);
struct strlist *lsdir(const char *name, bool (*filter)(const char *, struct dirent *));
bool lsdir_no_dot_filter(const char *name, struct dirent *d);
-int copyfile(const char *from, const char *to);
-int copyfile_mode(const char *from, const char *to, mode_t mode);
-int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi);
-int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size);
size_t hex_width(u64 v);
-extern unsigned int page_size;
-
int sysctl__max_stack(void);
int fetch_kernel_version(unsigned int *puint,
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index e5e6599603f4..ba4b4395f35d 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -11,7 +11,7 @@
#include "vdso.h"
#include "dso.h"
-#include "util.h"
+#include <internal/lib.h>
#include "map.h"
#include "symbol.h"
#include "machine.h"
diff --git a/tools/perf/util/zlib.c b/tools/perf/util/zlib.c
index 59d456f716e9..78d2297c1b67 100644
--- a/tools/perf/util/zlib.c
+++ b/tools/perf/util/zlib.c
@@ -7,11 +7,9 @@
#include <sys/mman.h>
#include <zlib.h>
#include <linux/compiler.h>
+#include <internal/lib.h>
#include "util/compress.h"
-#include "util/util.h"
-#include "util/debug.h"
-
#define CHUNK_SIZE 16384