summaryrefslogtreecommitdiffstats
path: root/tools/perf/util/evlist.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/evlist.c')
-rw-r--r--tools/perf/util/evlist.c249
1 files changed, 188 insertions, 61 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index e584cd30b0f2..b939221efd8d 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -18,6 +18,7 @@
#include <unistd.h>
#include "parse-events.h"
+#include "parse-options.h"
#include <sys/mman.h>
@@ -49,6 +50,18 @@ struct perf_evlist *perf_evlist__new(void)
return evlist;
}
+struct perf_evlist *perf_evlist__new_default(void)
+{
+ struct perf_evlist *evlist = perf_evlist__new();
+
+ if (evlist && perf_evlist__add_default(evlist)) {
+ perf_evlist__delete(evlist);
+ evlist = NULL;
+ }
+
+ return evlist;
+}
+
/**
* perf_evlist__set_id_pos - set the positions of event ids.
* @evlist: selected event list
@@ -242,7 +255,7 @@ int perf_evlist__add_newtp(struct perf_evlist *evlist,
if (evsel == NULL)
return -1;
- evsel->handler.func = handler;
+ evsel->handler = handler;
perf_evlist__add(evlist, evsel);
return 0;
}
@@ -527,7 +540,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
if ((old & md->mask) + size != ((old + size) & md->mask)) {
unsigned int offset = old;
unsigned int len = min(sizeof(*event), size), cpy;
- void *dst = &md->event_copy;
+ void *dst = md->event_copy;
do {
cpy = min(md->mask + 1 - (offset & md->mask), len);
@@ -537,7 +550,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
len -= cpy;
} while (len);
- event = &md->event_copy;
+ event = (union perf_event *) md->event_copy;
}
old += size;
@@ -594,6 +607,8 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist,
evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
MAP_SHARED, fd, 0);
if (evlist->mmap[idx].base == MAP_FAILED) {
+ pr_debug2("failed to mmap perf event ring buffer, error %d\n",
+ errno);
evlist->mmap[idx].base = NULL;
return -1;
}
@@ -602,9 +617,36 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist,
return 0;
}
-static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
+static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
+ int prot, int mask, int cpu, int thread,
+ int *output)
{
struct perf_evsel *evsel;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ int fd = FD(evsel, cpu, thread);
+
+ if (*output == -1) {
+ *output = fd;
+ if (__perf_evlist__mmap(evlist, idx, prot, mask,
+ *output) < 0)
+ return -1;
+ } else {
+ if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
+ return -1;
+ }
+
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
+ int mask)
+{
int cpu, thread;
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = thread_map__nr(evlist->threads);
@@ -614,23 +656,9 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
int output = -1;
for (thread = 0; thread < nr_threads; thread++) {
- list_for_each_entry(evsel, &evlist->entries, node) {
- int fd = FD(evsel, cpu, thread);
-
- if (output == -1) {
- output = fd;
- if (__perf_evlist__mmap(evlist, cpu,
- prot, mask, output) < 0)
- goto out_unmap;
- } else {
- if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
- goto out_unmap;
- }
-
- if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
- perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
- goto out_unmap;
- }
+ if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask,
+ cpu, thread, &output))
+ goto out_unmap;
}
}
@@ -642,9 +670,9 @@ out_unmap:
return -1;
}
-static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
+static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
+ int mask)
{
- struct perf_evsel *evsel;
int thread;
int nr_threads = thread_map__nr(evlist->threads);
@@ -652,23 +680,9 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
for (thread = 0; thread < nr_threads; thread++) {
int output = -1;
- list_for_each_entry(evsel, &evlist->entries, node) {
- int fd = FD(evsel, 0, thread);
-
- if (output == -1) {
- output = fd;
- if (__perf_evlist__mmap(evlist, thread,
- prot, mask, output) < 0)
- goto out_unmap;
- } else {
- if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
- goto out_unmap;
- }
-
- if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
- perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
- goto out_unmap;
- }
+ if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0,
+ thread, &output))
+ goto out_unmap;
}
return 0;
@@ -679,20 +693,76 @@ out_unmap:
return -1;
}
-/** perf_evlist__mmap - Create per cpu maps to receive events
- *
- * @evlist - list of events
- * @pages - map length in pages
- * @overwrite - overwrite older events?
- *
- * If overwrite is false the user needs to signal event consuption using:
- *
- * struct perf_mmap *m = &evlist->mmap[cpu];
- * unsigned int head = perf_mmap__read_head(m);
+static size_t perf_evlist__mmap_size(unsigned long pages)
+{
+ /* 512 kiB: default amount of unprivileged mlocked memory */
+ if (pages == UINT_MAX)
+ pages = (512 * 1024) / page_size;
+ else if (!is_power_of_2(pages))
+ return 0;
+
+ return (pages + 1) * page_size;
+}
+
+int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ unsigned int *mmap_pages = opt->value;
+ unsigned long pages, val;
+ size_t size;
+ static struct parse_tag tags[] = {
+ { .tag = 'B', .mult = 1 },
+ { .tag = 'K', .mult = 1 << 10 },
+ { .tag = 'M', .mult = 1 << 20 },
+ { .tag = 'G', .mult = 1 << 30 },
+ { .tag = 0 },
+ };
+
+ val = parse_tag_value(str, tags);
+ if (val != (unsigned long) -1) {
+ /* we got file size value */
+ pages = PERF_ALIGN(val, page_size) / page_size;
+ if (pages < (1UL << 31) && !is_power_of_2(pages)) {
+ pages = next_pow2(pages);
+ pr_info("rounding mmap pages size to %lu (%lu pages)\n",
+ pages * page_size, pages);
+ }
+ } else {
+ /* we got pages count value */
+ char *eptr;
+ pages = strtoul(str, &eptr, 10);
+ if (*eptr != '\0') {
+ pr_err("failed to parse --mmap_pages/-m value\n");
+ return -1;
+ }
+ }
+
+ if (pages > UINT_MAX || pages > SIZE_MAX / page_size) {
+ pr_err("--mmap_pages/-m value too big\n");
+ return -1;
+ }
+
+ size = perf_evlist__mmap_size(pages);
+ if (!size) {
+ pr_err("--mmap_pages/-m value must be a power of two.");
+ return -1;
+ }
+
+ *mmap_pages = pages;
+ return 0;
+}
+
+/**
+ * perf_evlist__mmap - Create mmaps to receive events.
+ * @evlist: list of events
+ * @pages: map length in pages
+ * @overwrite: overwrite older events?
*
- * perf_mmap__write_tail(m, head)
+ * If @overwrite is %false the user needs to signal event consumption using
+ * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
+ * automatically.
*
- * Using perf_evlist__read_on_cpu does this automatically.
+ * Return: %0 on success, negative error code otherwise.
*/
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
bool overwrite)
@@ -702,14 +772,6 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
const struct thread_map *threads = evlist->threads;
int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
- /* 512 kiB: default amount of unprivileged mlocked memory */
- if (pages == UINT_MAX)
- pages = (512 * 1024) / page_size;
- else if (!is_power_of_2(pages))
- return -EINVAL;
-
- mask = pages * page_size - 1;
-
if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
return -ENOMEM;
@@ -717,7 +779,9 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
return -ENOMEM;
evlist->overwrite = overwrite;
- evlist->mmap_len = (pages + 1) * page_size;
+ evlist->mmap_len = perf_evlist__mmap_size(pages);
+ pr_debug("mmap size %zuB\n", evlist->mmap_len);
+ mask = evlist->mmap_len - page_size - 1;
list_for_each_entry(evsel, &evlist->entries, node) {
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
@@ -1073,3 +1137,66 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
return printed + fprintf(fp, "\n");;
}
+
+int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
+ int err, char *buf, size_t size)
+{
+ char sbuf[128];
+
+ switch (err) {
+ case ENOENT:
+ scnprintf(buf, size, "%s",
+ "Error:\tUnable to find debugfs\n"
+ "Hint:\tWas your kernel was compiled with debugfs support?\n"
+ "Hint:\tIs the debugfs filesystem mounted?\n"
+ "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
+ break;
+ case EACCES:
+ scnprintf(buf, size,
+ "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
+ "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
+ debugfs_mountpoint, debugfs_mountpoint);
+ break;
+ default:
+ scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
+ break;
+ }
+
+ return 0;
+}
+
+int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
+ int err, char *buf, size_t size)
+{
+ int printed, value;
+ char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
+
+ switch (err) {
+ case EACCES:
+ case EPERM:
+ printed = scnprintf(buf, size,
+ "Error:\t%s.\n"
+ "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
+
+ if (filename__read_int("/proc/sys/kernel/perf_event_paranoid", &value))
+ break;
+
+ printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
+
+ if (value >= 2) {
+ printed += scnprintf(buf + printed, size - printed,
+ "For your workloads it needs to be <= 1\nHint:\t");
+ }
+ printed += scnprintf(buf + printed, size - printed,
+ "For system wide tracing it needs to be set to -1");
+
+ printed += scnprintf(buf + printed, size - printed,
+ ".\nHint:\tThe current value is %d.", value);
+ break;
+ default:
+ scnprintf(buf, size, "%s", emsg);
+ break;
+ }
+
+ return 0;
+}