From 7074674e7338863e6404909c9761d4d3a610a379 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 20 Nov 2019 16:15:13 -0800 Subject: perf cpumap: Maintain cpumaps ordered and without dups Enforce this in _trim() Needed for followon change. Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lore.kernel.org/lkml/20191121001522.180827-4-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/lib/cpumap.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'tools/perf') diff --git a/tools/perf/lib/cpumap.c b/tools/perf/lib/cpumap.c index 2ca1fafa620d..d81656b4635e 100644 --- a/tools/perf/lib/cpumap.c +++ b/tools/perf/lib/cpumap.c @@ -68,14 +68,28 @@ static struct perf_cpu_map *cpu_map__default_new(void) return cpus; } +static int cmp_int(const void *a, const void *b) +{ + return *(const int *)a - *(const int*)b; +} + static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus) { size_t payload_size = nr_cpus * sizeof(int); struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + payload_size); + int i, j; if (cpus != NULL) { - cpus->nr = nr_cpus; memcpy(cpus->map, tmp_cpus, payload_size); + qsort(cpus->map, nr_cpus, sizeof(int), cmp_int); + /* Remove dups */ + j = 0; + for (i = 0; i < nr_cpus; i++) { + if (i == 0 || cpus->map[i] != cpus->map[i - 1]) + cpus->map[j++] = cpus->map[i]; + } + cpus->nr = j; + assert(j <= nr_cpus); refcount_set(&cpus->refcnt, 1); } -- cgit v1.2.3 From a2408a70368ade9c99de27da78d49416313b8833 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 20 Nov 2019 16:15:14 -0800 Subject: perf evlist: Maintain evlist->all_cpus Maintain a cpumap in the evlist that is the union of all the cpus of the events. This needs a cpumap merge operation, which is added together with tests. v2: Add tests for cpu map merge Fix handling of duplicates Rename _update to _merge Factor out sorting. Fix handling of NULL maps in merge v3: Add comments and empty lines to _merge Committer testing: # perf test "Merge cpu map" 52: Merge cpu map : Ok # Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Tested-by: Arnaldo Carvalho de Melo Link: http://lore.kernel.org/lkml/20191121001522.180827-5-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/lib/cpumap.c | 57 ++++++++++++++++++++++++++++++++ tools/perf/lib/evlist.c | 1 + tools/perf/lib/include/internal/evlist.h | 1 + tools/perf/lib/include/perf/cpumap.h | 2 ++ tools/perf/tests/builtin-test.c | 5 +++ tools/perf/tests/cpumap.c | 16 +++++++++ tools/perf/tests/tests.h | 1 + 7 files changed, 83 insertions(+) (limited to 'tools/perf') diff --git a/tools/perf/lib/cpumap.c b/tools/perf/lib/cpumap.c index d81656b4635e..f93f4e703e4c 100644 --- a/tools/perf/lib/cpumap.c +++ b/tools/perf/lib/cpumap.c @@ -286,3 +286,60 @@ int perf_cpu_map__max(struct perf_cpu_map *map) return max; } + +/* + * Merge two cpumaps + * + * orig either gets freed and replaced with a new map, or reused + * with no reference count change (similar to "realloc") + * other has its reference count increased. + */ + +struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig, + struct perf_cpu_map *other) +{ + int *tmp_cpus; + int tmp_len; + int i, j, k; + struct perf_cpu_map *merged; + + if (!orig && !other) + return NULL; + if (!orig) { + perf_cpu_map__get(other); + return other; + } + if (!other) + return orig; + if (orig->nr == other->nr && + !memcmp(orig->map, other->map, orig->nr * sizeof(int))) + return orig; + + tmp_len = orig->nr + other->nr; + tmp_cpus = malloc(tmp_len * sizeof(int)); + if (!tmp_cpus) + return NULL; + + /* Standard merge algorithm from wikipedia */ + i = j = k = 0; + while (i < orig->nr && j < other->nr) { + if (orig->map[i] <= other->map[j]) { + if (orig->map[i] == other->map[j]) + j++; + tmp_cpus[k++] = orig->map[i++]; + } else + tmp_cpus[k++] = other->map[j++]; + } + + while (i < orig->nr) + tmp_cpus[k++] = orig->map[i++]; + + while (j < other->nr) + tmp_cpus[k++] = other->map[j++]; + assert(k <= tmp_len); + + merged = cpu_map__trim_new(k, tmp_cpus); + free(tmp_cpus); + perf_cpu_map__put(orig); + return merged; +} diff --git a/tools/perf/lib/evlist.c b/tools/perf/lib/evlist.c index 205ddbb80bc1..ae9e65aa2491 100644 --- a/tools/perf/lib/evlist.c +++ b/tools/perf/lib/evlist.c @@ -54,6 +54,7 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, perf_thread_map__put(evsel->threads); evsel->threads = perf_thread_map__get(evlist->threads); + evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus); } static void perf_evlist__propagate_maps(struct perf_evlist *evlist) diff --git a/tools/perf/lib/include/internal/evlist.h b/tools/perf/lib/include/internal/evlist.h index a2fbccf1922f..74dc8c3f0b66 100644 --- a/tools/perf/lib/include/internal/evlist.h +++ b/tools/perf/lib/include/internal/evlist.h @@ -18,6 +18,7 @@ struct perf_evlist { int nr_entries; bool has_user_cpus; struct perf_cpu_map *cpus; + struct perf_cpu_map *all_cpus; struct perf_thread_map *threads; int nr_mmaps; size_t mmap_len; diff --git a/tools/perf/lib/include/perf/cpumap.h b/tools/perf/lib/include/perf/cpumap.h index ac9aa497f84a..6a17ad730cbc 100644 --- a/tools/perf/lib/include/perf/cpumap.h +++ b/tools/perf/lib/include/perf/cpumap.h @@ -12,6 +12,8 @@ LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void); LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list); LIBPERF_API struct perf_cpu_map *perf_cpu_map__read(FILE *file); LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map); +LIBPERF_API struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig, + struct perf_cpu_map *other); LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map); LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus); diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 7115aa32a51e..82d19a8fcac7 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -259,6 +259,11 @@ static struct test generic_tests[] = { .desc = "Print cpu map", .func = test__cpu_map_print, }, + { + .desc = "Merge cpu map", + .func = test__cpu_map_merge, + }, + { .desc = "Probe SDT events", .func = test__sdt_event, diff --git a/tools/perf/tests/cpumap.c b/tools/perf/tests/cpumap.c index 8a0d236202b0..4ac56741ac5f 100644 --- a/tools/perf/tests/cpumap.c +++ b/tools/perf/tests/cpumap.c @@ -120,3 +120,19 @@ int test__cpu_map_print(struct test *test __maybe_unused, int subtest __maybe_un TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1-10,12-20,22-30,32-40")); return 0; } + +int test__cpu_map_merge(struct test *test __maybe_unused, int subtest __maybe_unused) +{ + struct perf_cpu_map *a = perf_cpu_map__new("4,2,1"); + struct perf_cpu_map *b = perf_cpu_map__new("4,5,7"); + struct perf_cpu_map *c = perf_cpu_map__merge(a, b); + char buf[100]; + + TEST_ASSERT_VAL("failed to merge map: bad nr", c->nr == 5); + cpu_map__snprint(c, buf, sizeof(buf)); + TEST_ASSERT_VAL("failed to merge map: bad result", !strcmp(buf, "1-2,4-5,7")); + perf_cpu_map__put(a); + perf_cpu_map__put(b); + perf_cpu_map__put(c); + return 0; +} diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index 25aea387e2bf..4f9ae6af78ec 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -98,6 +98,7 @@ int test__event_update(struct test *test, int subtest); int test__event_times(struct test *test, int subtest); int test__backward_ring_buffer(struct test *test, int subtest); int test__cpu_map_print(struct test *test, int subtest); +int test__cpu_map_merge(struct test *test, int subtest); int test__sdt_event(struct test *test, int subtest); int test__is_printable_array(struct test *test, int subtest); int test__bitmap_print(struct test *test, int subtest); -- cgit v1.2.3 From a8cbe40fe9f4ba499cc60b8b6a6851c2c1963797 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 20 Nov 2019 16:15:15 -0800 Subject: perf evsel: Add iterator to iterate over events ordered by CPU Add some common code that is needed to iterate over all events in CPU order. Used in followon patches Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lore.kernel.org/lkml/20191121001522.180827-6-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cpumap.h | 1 + tools/perf/util/evlist.c | 32 ++++++++++++++++++++++++++++++++ tools/perf/util/evlist.h | 8 ++++++++ tools/perf/util/evsel.h | 1 + 4 files changed, 42 insertions(+) (limited to 'tools/perf') diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 57943f3685f8..3a442f021468 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -63,4 +63,5 @@ int cpu_map__build_map(struct perf_cpu_map *cpus, struct perf_cpu_map **res, int cpu_map__cpu(struct perf_cpu_map *cpus, int idx); bool cpu_map__has(struct perf_cpu_map *cpus, int cpu); + #endif /* __PERF_CPUMAP_H */ diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index fdce590d2278..dae6e846b2f8 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -342,6 +342,38 @@ static int perf_evlist__nr_threads(struct evlist *evlist, return perf_thread_map__nr(evlist->core.threads); } +void evlist__cpu_iter_start(struct evlist *evlist) +{ + struct evsel *pos; + + /* + * Reset the per evsel cpu_iter. This is needed because + * each evsel's cpumap may have a different index space, + * and some operations need the index to modify + * the FD xyarray (e.g. open, close) + */ + evlist__for_each_entry(evlist, pos) + pos->cpu_iter = 0; +} + +bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu) +{ + if (ev->cpu_iter >= ev->core.cpus->nr) + return true; + if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu) + return true; + return false; +} + +bool evsel__cpu_iter_skip(struct evsel *ev, int cpu) +{ + if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) { + ev->cpu_iter++; + return false; + } + return true; +} + void evlist__disable(struct evlist *evlist) { struct evsel *pos; diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 3655b9ebb147..22e2f58eabea 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -334,9 +334,17 @@ void perf_evlist__to_front(struct evlist *evlist, #define evlist__for_each_entry_safe(evlist, tmp, evsel) \ __evlist__for_each_entry_safe(&(evlist)->core.entries, tmp, evsel) +#define evlist__for_each_cpu(evlist, index, cpu) \ + evlist__cpu_iter_start(evlist); \ + perf_cpu_map__for_each_cpu (cpu, index, (evlist)->core.all_cpus) + void perf_evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel); +void evlist__cpu_iter_start(struct evlist *evlist); +bool evsel__cpu_iter_skip(struct evsel *ev, int cpu); +bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu); + struct evsel * perf_evlist__find_evsel_by_str(struct evlist *evlist, const char *str); diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index ddc5ee6f6592..b10d5ba21966 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -95,6 +95,7 @@ struct evsel { bool collect_stat; bool weak_group; bool percore; + int cpu_iter; const char *pmu_name; struct { perf_evsel__sb_cb_t *cb; -- cgit v1.2.3 From 99d6141d677a8cd0b35390a29527c8def42538b1 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 20 Nov 2019 16:15:16 -0800 Subject: perf evsel: Add functions to close evsel on a CPU Refactor the existing all CPU function to use the per CPU close internally. Export APIs to close per CPU. Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lore.kernel.org/lkml/20191121001522.180827-7-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/lib/evsel.c | 27 +++++++++++++++++++++------ tools/perf/lib/include/perf/evsel.h | 1 + 2 files changed, 22 insertions(+), 6 deletions(-) (limited to 'tools/perf') diff --git a/tools/perf/lib/evsel.c b/tools/perf/lib/evsel.c index 5a89857b0381..ea775dacbd2d 100644 --- a/tools/perf/lib/evsel.c +++ b/tools/perf/lib/evsel.c @@ -114,16 +114,23 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, return err; } +static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu) +{ + int thread; + + for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) { + if (FD(evsel, cpu, thread) >= 0) + close(FD(evsel, cpu, thread)); + FD(evsel, cpu, thread) = -1; + } +} + void perf_evsel__close_fd(struct perf_evsel *evsel) { - int cpu, thread; + int cpu; for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) - for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) { - if (FD(evsel, cpu, thread) >= 0) - close(FD(evsel, cpu, thread)); - FD(evsel, cpu, thread) = -1; - } + perf_evsel__close_fd_cpu(evsel, cpu); } void perf_evsel__free_fd(struct perf_evsel *evsel) @@ -141,6 +148,14 @@ void perf_evsel__close(struct perf_evsel *evsel) perf_evsel__free_fd(evsel); } +void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu) +{ + if (evsel->fd == NULL) + return; + + perf_evsel__close_fd_cpu(evsel, cpu); +} + int perf_evsel__read_size(struct perf_evsel *evsel) { u64 read_format = evsel->attr.read_format; diff --git a/tools/perf/lib/include/perf/evsel.h b/tools/perf/lib/include/perf/evsel.h index 557f5815a9c9..e7add554f861 100644 --- a/tools/perf/lib/include/perf/evsel.h +++ b/tools/perf/lib/include/perf/evsel.h @@ -26,6 +26,7 @@ LIBPERF_API void perf_evsel__delete(struct perf_evsel *evsel); LIBPERF_API int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads); LIBPERF_API void perf_evsel__close(struct perf_evsel *evsel); +LIBPERF_API void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu); LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, struct perf_counts_values *count); LIBPERF_API int perf_evsel__enable(struct perf_evsel *evsel); -- cgit v1.2.3 From 7736627b865defff2430e95df235b4aa2450bc37 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 20 Nov 2019 16:15:17 -0800 Subject: perf stat: Use affinity for closing file descriptors Closing a perf fd can also trigger an IPI to the target CPU. Use the same affinity technique as we use for reading/enabling events to closing to optimize the CPU transitions. Before on a large test case with 94 CPUs: % time seconds usecs/call calls errors syscall ------ ----------- ----------- --------- --------- ---------------- 32.56 3.085463 50 61483 close After: 10.54 0.735704 11 61485 close Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lore.kernel.org/lkml/20191121001522.180827-8-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evlist.c | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) (limited to 'tools/perf') diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index dae6e846b2f8..2e8d38a324be 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -18,6 +18,7 @@ #include "debug.h" #include "units.h" #include // page_size +#include "affinity.h" #include "../perf.h" #include "asm/bug.h" #include "bpf-event.h" @@ -1169,9 +1170,35 @@ void perf_evlist__set_selected(struct evlist *evlist, void evlist__close(struct evlist *evlist) { struct evsel *evsel; + struct affinity affinity; + int cpu, i; - evlist__for_each_entry_reverse(evlist, evsel) - evsel__close(evsel); + /* + * With perf record core.cpus is usually NULL. + * Use the old method to handle this for now. + */ + if (!evlist->core.cpus) { + evlist__for_each_entry_reverse(evlist, evsel) + evsel__close(evsel); + return; + } + + if (affinity__setup(&affinity) < 0) + return; + evlist__for_each_cpu(evlist, i, cpu) { + affinity__set(&affinity, cpu); + + evlist__for_each_entry_reverse(evlist, evsel) { + if (evsel__cpu_iter_skip(evsel, cpu)) + continue; + perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1); + } + } + affinity__cleanup(&affinity); + evlist__for_each_entry_reverse(evlist, evsel) { + perf_evsel__free_fd(&evsel->core); + perf_evsel__free_id(&evsel->core); + } } static int perf_evlist__create_syswide_maps(struct evlist *evlist) -- cgit v1.2.3 From e0e6a6ca3ac211cc07486330a2b89f41ea31b4dd Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 20 Nov 2019 16:15:18 -0800 Subject: perf stat: Factor out open error handling Factor out the open error handling into a separate function. This is useful for followon patches who need to duplicate this. No behavior change intended. Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lore.kernel.org/lkml/20191121001522.180827-9-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 100 +++++++++++++++++++++++++++------------------- 1 file changed, 60 insertions(+), 40 deletions(-) (limited to 'tools/perf') diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 0a15253b438c..1d9d7161815e 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -420,6 +420,57 @@ static bool is_target_alive(struct target *_target, return false; } +enum counter_recovery { + COUNTER_SKIP, + COUNTER_RETRY, + COUNTER_FATAL, +}; + +static enum counter_recovery stat_handle_error(struct evsel *counter) +{ + char msg[BUFSIZ]; + /* + * PPC returns ENXIO for HW counters until 2.6.37 + * (behavior changed with commit b0a873e). + */ + if (errno == EINVAL || errno == ENOSYS || + errno == ENOENT || errno == EOPNOTSUPP || + errno == ENXIO) { + if (verbose > 0) + ui__warning("%s event is not supported by the kernel.\n", + perf_evsel__name(counter)); + counter->supported = false; + + if ((counter->leader != counter) || + !(counter->leader->core.nr_members > 1)) + return COUNTER_SKIP; + } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) { + if (verbose > 0) + ui__warning("%s\n", msg); + return COUNTER_RETRY; + } else if (target__has_per_thread(&target) && + evsel_list->core.threads && + evsel_list->core.threads->err_thread != -1) { + /* + * For global --per-thread case, skip current + * error thread. + */ + if (!thread_map__remove(evsel_list->core.threads, + evsel_list->core.threads->err_thread)) { + evsel_list->core.threads->err_thread = -1; + return COUNTER_RETRY; + } + } + + perf_evsel__open_strerror(counter, &target, + errno, msg, sizeof(msg)); + ui__error("%s\n", msg); + + if (child_pid != -1) + kill(child_pid, SIGTERM); + return COUNTER_FATAL; +} + static int __run_perf_stat(int argc, const char **argv, int run_idx) { int interval = stat_config.interval; @@ -469,47 +520,16 @@ try_again: goto try_again; } - /* - * PPC returns ENXIO for HW counters until 2.6.37 - * (behavior changed with commit b0a873e). - */ - if (errno == EINVAL || errno == ENOSYS || - errno == ENOENT || errno == EOPNOTSUPP || - errno == ENXIO) { - if (verbose > 0) - ui__warning("%s event is not supported by the kernel.\n", - perf_evsel__name(counter)); - counter->supported = false; - - if ((counter->leader != counter) || - !(counter->leader->core.nr_members > 1)) - continue; - } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) { - if (verbose > 0) - ui__warning("%s\n", msg); - goto try_again; - } else if (target__has_per_thread(&target) && - evsel_list->core.threads && - evsel_list->core.threads->err_thread != -1) { - /* - * For global --per-thread case, skip current - * error thread. - */ - if (!thread_map__remove(evsel_list->core.threads, - evsel_list->core.threads->err_thread)) { - evsel_list->core.threads->err_thread = -1; - goto try_again; - } + switch (stat_handle_error(counter)) { + case COUNTER_FATAL: + return -1; + case COUNTER_RETRY: + goto try_again; + case COUNTER_SKIP: + continue; + default: + break; } - - perf_evsel__open_strerror(counter, &target, - errno, msg, sizeof(msg)); - ui__error("%s\n", msg); - - if (child_pid != -1) - kill(child_pid, SIGTERM); - - return -1; } counter->supported = true; -- cgit v1.2.3 From 4804e0111662d7d89edf4e767a64c6f7e4778bb1 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 20 Nov 2019 16:15:19 -0800 Subject: perf stat: Use affinity for opening events Restructure the event opening in perf stat to cycle through the events by CPU after setting affinity to that CPU. This eliminates IPI overhead in the perf API. We have to loop through the CPU in the outter builtin-stat code instead of leaving that to low level functions. It has to change the weak group fallback strategy slightly. Since we cannot easily undo the opens for other CPUs move the weak group retry to a separate loop. Before with a large test case with 94 CPUs: % time seconds usecs/call calls errors syscall ------ ----------- ----------- --------- --------- ---------------- 42.75 4.050910 67 60046 110 perf_event_open After: 26.86 0.944396 16 58069 110 perf_event_open (the number changes slightly because the weak group retries work differently and the test case relies on weak groups) Committer notes: Added one of the hunks in a patch provided by Andi after I noticed that the "event times" 'perf test' entry was segfaulting. Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lore.kernel.org/lkml/20191121001522.180827-10-andi@firstfloor.org Link: http://lore.kernel.org/lkml/20191127232657.GL84886@tassilo.jf.intel.com # Fix Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 2 +- tools/perf/builtin-stat.c | 121 ++++++++++++++++++++++++++++++++++------- tools/perf/tests/event-times.c | 4 +- tools/perf/util/evlist.c | 10 +++- tools/perf/util/evlist.h | 3 +- tools/perf/util/evsel.c | 22 ++++++-- tools/perf/util/evsel.h | 5 +- tools/perf/util/stat.c | 5 +- tools/perf/util/stat.h | 3 +- 9 files changed, 141 insertions(+), 34 deletions(-) (limited to 'tools/perf') diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index b5063d3b6fd0..fb19ef63cc35 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -832,7 +832,7 @@ try_again: if ((errno == EINVAL || errno == EBADF) && pos->leader != pos && pos->weak_group) { - pos = perf_evlist__reset_weak_group(evlist, pos); + pos = perf_evlist__reset_weak_group(evlist, pos, true); goto try_again; } rc = -errno; diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 1d9d7161815e..cf8516e701e2 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -65,6 +65,7 @@ #include "util/target.h" #include "util/time-utils.h" #include "util/top.h" +#include "util/affinity.h" #include "asm/bug.h" #include @@ -440,6 +441,11 @@ static enum counter_recovery stat_handle_error(struct evsel *counter) ui__warning("%s event is not supported by the kernel.\n", perf_evsel__name(counter)); counter->supported = false; + /* + * errored is a sticky flag that means one of the counter's + * cpu event had a problem and needs to be reexamined. + */ + counter->errored = true; if ((counter->leader != counter) || !(counter->leader->core.nr_members > 1)) @@ -484,6 +490,9 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) int status = 0; const bool forks = (argc > 0); bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; + struct affinity affinity; + int i, cpu; + bool second_pass = false; if (interval) { ts.tv_sec = interval / USEC_PER_MSEC; @@ -508,30 +517,104 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) if (group) perf_evlist__set_leader(evsel_list); - evlist__for_each_entry(evsel_list, counter) { + if (affinity__setup(&affinity) < 0) + return -1; + + evlist__for_each_cpu (evsel_list, i, cpu) { + affinity__set(&affinity, cpu); + + evlist__for_each_entry(evsel_list, counter) { + if (evsel__cpu_iter_skip(counter, cpu)) + continue; + if (counter->reset_group || counter->errored) + continue; try_again: - if (create_perf_stat_counter(counter, &stat_config, &target) < 0) { - - /* Weak group failed. Reset the group. */ - if ((errno == EINVAL || errno == EBADF) && - counter->leader != counter && - counter->weak_group) { - counter = perf_evlist__reset_weak_group(evsel_list, counter); - goto try_again; + if (create_perf_stat_counter(counter, &stat_config, &target, + counter->cpu_iter - 1) < 0) { + + /* + * Weak group failed. We cannot just undo this here + * because earlier CPUs might be in group mode, and the kernel + * doesn't support mixing group and non group reads. Defer + * it to later. + * Don't close here because we're in the wrong affinity. + */ + if ((errno == EINVAL || errno == EBADF) && + counter->leader != counter && + counter->weak_group) { + perf_evlist__reset_weak_group(evsel_list, counter, false); + assert(counter->reset_group); + second_pass = true; + continue; + } + + switch (stat_handle_error(counter)) { + case COUNTER_FATAL: + return -1; + case COUNTER_RETRY: + goto try_again; + case COUNTER_SKIP: + continue; + default: + break; + } + } + counter->supported = true; + } + } - switch (stat_handle_error(counter)) { - case COUNTER_FATAL: - return -1; - case COUNTER_RETRY: - goto try_again; - case COUNTER_SKIP: - continue; - default: - break; + if (second_pass) { + /* + * Now redo all the weak group after closing them, + * and also close errored counters. + */ + + evlist__for_each_cpu(evsel_list, i, cpu) { + affinity__set(&affinity, cpu); + /* First close errored or weak retry */ + evlist__for_each_entry(evsel_list, counter) { + if (!counter->reset_group && !counter->errored) + continue; + if (evsel__cpu_iter_skip_no_inc(counter, cpu)) + continue; + perf_evsel__close_cpu(&counter->core, counter->cpu_iter); + } + /* Now reopen weak */ + evlist__for_each_entry(evsel_list, counter) { + if (!counter->reset_group && !counter->errored) + continue; + if (evsel__cpu_iter_skip(counter, cpu)) + continue; + if (!counter->reset_group) + continue; +try_again_reset: + pr_debug2("reopening weak %s\n", perf_evsel__name(counter)); + if (create_perf_stat_counter(counter, &stat_config, &target, + counter->cpu_iter - 1) < 0) { + + switch (stat_handle_error(counter)) { + case COUNTER_FATAL: + return -1; + case COUNTER_RETRY: + goto try_again_reset; + case COUNTER_SKIP: + continue; + default: + break; + } + } + counter->supported = true; } } - counter->supported = true; + } + affinity__cleanup(&affinity); + + evlist__for_each_entry(evsel_list, counter) { + if (!counter->supported) { + perf_evsel__free_fd(&counter->core); + continue; + } l = strlen(counter->unit); if (l > stat_config.unit_width) diff --git a/tools/perf/tests/event-times.c b/tools/perf/tests/event-times.c index 1ee8704e2284..1e8a9f5c356d 100644 --- a/tools/perf/tests/event-times.c +++ b/tools/perf/tests/event-times.c @@ -125,7 +125,7 @@ static int attach__cpu_disabled(struct evlist *evlist) evsel->core.attr.disabled = 1; - err = perf_evsel__open_per_cpu(evsel, cpus); + err = perf_evsel__open_per_cpu(evsel, cpus, -1); if (err) { if (err == -EACCES) return TEST_SKIP; @@ -152,7 +152,7 @@ static int attach__cpu_enabled(struct evlist *evlist) return -1; } - err = perf_evsel__open_per_cpu(evsel, cpus); + err = perf_evsel__open_per_cpu(evsel, cpus, -1); if (err == -EACCES) return TEST_SKIP; diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 2e8d38a324be..096a4ea65b1b 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -1636,7 +1636,8 @@ void perf_evlist__force_leader(struct evlist *evlist) } struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list, - struct evsel *evsel) + struct evsel *evsel, + bool close) { struct evsel *c2, *leader; bool is_open = true; @@ -1653,10 +1654,15 @@ struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list, if (c2 == evsel) is_open = false; if (c2->leader == leader) { - if (is_open) + if (is_open && close) perf_evsel__close(&c2->core); c2->leader = c2; c2->core.nr_members = 0; + /* + * Set this for all former members of the group + * to indicate they get reopened. + */ + c2->reset_group = true; } } return leader; diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 22e2f58eabea..f5bd5c386df1 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -356,5 +356,6 @@ bool perf_evlist__exclude_kernel(struct evlist *evlist); void perf_evlist__force_leader(struct evlist *evlist); struct evsel *perf_evlist__reset_weak_group(struct evlist *evlist, - struct evsel *evsel); + struct evsel *evsel, + bool close); #endif /* __PERF_EVLIST_H */ diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index f4dea055b080..aa180d1df50f 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1587,8 +1587,9 @@ static int perf_event_open(struct evsel *evsel, return fd; } -int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, - struct perf_thread_map *threads) +static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, + struct perf_thread_map *threads, + int start_cpu, int end_cpu) { int cpu, thread, nthreads; unsigned long flags = PERF_FLAG_FD_CLOEXEC; @@ -1665,7 +1666,7 @@ retry_sample_id: display_attr(&evsel->core.attr); - for (cpu = 0; cpu < cpus->nr; cpu++) { + for (cpu = start_cpu; cpu < end_cpu; cpu++) { for (thread = 0; thread < nthreads; thread++) { int fd, group_fd; @@ -1843,6 +1844,12 @@ out_close: return err; } +int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, + struct perf_thread_map *threads) +{ + return evsel__open_cpu(evsel, cpus, threads, 0, cpus ? cpus->nr : 1); +} + void evsel__close(struct evsel *evsel) { perf_evsel__close(&evsel->core); @@ -1850,9 +1857,14 @@ void evsel__close(struct evsel *evsel) } int perf_evsel__open_per_cpu(struct evsel *evsel, - struct perf_cpu_map *cpus) + struct perf_cpu_map *cpus, + int cpu) { - return evsel__open(evsel, cpus, NULL); + if (cpu == -1) + return evsel__open_cpu(evsel, cpus, NULL, 0, + cpus ? cpus->nr : 1); + + return evsel__open_cpu(evsel, cpus, NULL, cpu, cpu + 1); } int perf_evsel__open_per_thread(struct evsel *evsel, diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index b10d5ba21966..ca82a93960cd 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -94,6 +94,8 @@ struct evsel { struct evsel *metric_leader; bool collect_stat; bool weak_group; + bool reset_group; + bool errored; bool percore; int cpu_iter; const char *pmu_name; @@ -223,7 +225,8 @@ int evsel__enable(struct evsel *evsel); int evsel__disable(struct evsel *evsel); int perf_evsel__open_per_cpu(struct evsel *evsel, - struct perf_cpu_map *cpus); + struct perf_cpu_map *cpus, + int cpu); int perf_evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads); int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 332cb730785b..5f26137b8d60 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -464,7 +464,8 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp) int create_perf_stat_counter(struct evsel *evsel, struct perf_stat_config *config, - struct target *target) + struct target *target, + int cpu) { struct perf_event_attr *attr = &evsel->core.attr; struct evsel *leader = evsel->leader; @@ -518,7 +519,7 @@ int create_perf_stat_counter(struct evsel *evsel, } if (target__has_cpu(target) && !target__has_per_thread(target)) - return perf_evsel__open_per_cpu(evsel, evsel__cpus(evsel)); + return perf_evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu); return perf_evsel__open_per_thread(evsel, evsel->core.threads); } diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index bfa9aaf36ce6..fb990efa54a8 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -214,7 +214,8 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp); int create_perf_stat_counter(struct evsel *evsel, struct perf_stat_config *config, - struct target *target); + struct target *target, + int cpu); void perf_evlist__print_counters(struct evlist *evlist, struct perf_stat_config *config, -- cgit v1.2.3 From 4b49ab708d1804bc8b2fcdde79844b8bc98f7ef6 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 20 Nov 2019 16:15:20 -0800 Subject: perf stat: Use affinity for reading Restructure event reading to use affinity to minimize the number of IPIs needed. Before on a large test case with 94 CPUs: % time seconds usecs/call calls errors syscall ------ ----------- ----------- --------- --------- ---------------- 3.16 0.106079 4 22082 read After: 3.43 0.081295 3 22082 read Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lore.kernel.org/lkml/20191121001522.180827-11-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 97 +++++++++++++++++++++++++++-------------------- tools/perf/util/evsel.h | 1 + 2 files changed, 57 insertions(+), 41 deletions(-) (limited to 'tools/perf') diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index cf8516e701e2..a098c2ebf4ea 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -266,15 +266,10 @@ static int read_single_counter(struct evsel *counter, int cpu, * Read out the results of a single counter: * do not aggregate counts across CPUs in system-wide mode */ -static int read_counter(struct evsel *counter, struct timespec *rs) +static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) { int nthreads = perf_thread_map__nr(evsel_list->core.threads); - int ncpus, cpu, thread; - - if (target__has_cpu(&target) && !target__has_per_thread(&target)) - ncpus = perf_evsel__nr_cpus(counter); - else - ncpus = 1; + int thread; if (!counter->supported) return -ENOENT; @@ -283,40 +278,38 @@ static int read_counter(struct evsel *counter, struct timespec *rs) nthreads = 1; for (thread = 0; thread < nthreads; thread++) { - for (cpu = 0; cpu < ncpus; cpu++) { - struct perf_counts_values *count; - - count = perf_counts(counter->counts, cpu, thread); - - /* - * The leader's group read loads data into its group members - * (via perf_evsel__read_counter) and sets threir count->loaded. - */ - if (!perf_counts__is_loaded(counter->counts, cpu, thread) && - read_single_counter(counter, cpu, thread, rs)) { - counter->counts->scaled = -1; - perf_counts(counter->counts, cpu, thread)->ena = 0; - perf_counts(counter->counts, cpu, thread)->run = 0; - return -1; - } + struct perf_counts_values *count; - perf_counts__set_loaded(counter->counts, cpu, thread, false); + count = perf_counts(counter->counts, cpu, thread); - if (STAT_RECORD) { - if (perf_evsel__write_stat_event(counter, cpu, thread, count)) { - pr_err("failed to write stat event\n"); - return -1; - } - } + /* + * The leader's group read loads data into its group members + * (via perf_evsel__read_counter()) and sets their count->loaded. + */ + if (!perf_counts__is_loaded(counter->counts, cpu, thread) && + read_single_counter(counter, cpu, thread, rs)) { + counter->counts->scaled = -1; + perf_counts(counter->counts, cpu, thread)->ena = 0; + perf_counts(counter->counts, cpu, thread)->run = 0; + return -1; + } + + perf_counts__set_loaded(counter->counts, cpu, thread, false); - if (verbose > 1) { - fprintf(stat_config.output, - "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", - perf_evsel__name(counter), - cpu, - count->val, count->ena, count->run); + if (STAT_RECORD) { + if (perf_evsel__write_stat_event(counter, cpu, thread, count)) { + pr_err("failed to write stat event\n"); + return -1; } } + + if (verbose > 1) { + fprintf(stat_config.output, + "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", + perf_evsel__name(counter), + cpu, + count->val, count->ena, count->run); + } } return 0; @@ -325,15 +318,37 @@ static int read_counter(struct evsel *counter, struct timespec *rs) static void read_counters(struct timespec *rs) { struct evsel *counter; - int ret; + struct affinity affinity; + int i, ncpus, cpu; + + if (affinity__setup(&affinity) < 0) + return; + + ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus); + if (!target__has_cpu(&target) || target__has_per_thread(&target)) + ncpus = 1; + evlist__for_each_cpu(evsel_list, i, cpu) { + if (i >= ncpus) + break; + affinity__set(&affinity, cpu); + + evlist__for_each_entry(evsel_list, counter) { + if (evsel__cpu_iter_skip(counter, cpu)) + continue; + if (!counter->err) { + counter->err = read_counter_cpu(counter, rs, + counter->cpu_iter - 1); + } + } + } + affinity__cleanup(&affinity); evlist__for_each_entry(evsel_list, counter) { - ret = read_counter(counter, rs); - if (ret) + if (counter->err) pr_debug("failed to read counter %s\n", counter->name); - - if (ret == 0 && perf_stat_process_counter(&stat_config, counter)) + if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) pr_warning("failed to process counter %s\n", counter->name); + counter->err = 0; } } diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index ca82a93960cd..c8af4bc23f8f 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -86,6 +86,7 @@ struct evsel { struct list_head config_terms; struct bpf_object *bpf_obj; int bpf_fd; + int err; bool auto_merge_stats; bool merged_stat; const char * metric_expr; -- cgit v1.2.3 From 363fb12189d58ebc60456561b8540d68013782a6 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 20 Nov 2019 16:15:21 -0800 Subject: perf evsel: Add functions to enable/disable for a specific CPU Refactor the existing functions to use these functions internally. Used in the next patch. Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lore.kernel.org/lkml/20191121001522.180827-12-andi@firstfloor.org Link: http://lore.kernel.org/lkml/20191127232657.GL84886@tassilo.jf.intel.com # Fix Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/lib/evsel.c | 49 +++++++++++++++++++++++++++---------- tools/perf/lib/include/perf/evsel.h | 2 ++ tools/perf/util/evsel.c | 13 +++++++++- tools/perf/util/evsel.h | 2 ++ 4 files changed, 52 insertions(+), 14 deletions(-) (limited to 'tools/perf') diff --git a/tools/perf/lib/evsel.c b/tools/perf/lib/evsel.c index ea775dacbd2d..4dc06289f4c7 100644 --- a/tools/perf/lib/evsel.c +++ b/tools/perf/lib/evsel.c @@ -198,38 +198,61 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, } static int perf_evsel__run_ioctl(struct perf_evsel *evsel, - int ioc, void *arg) + int ioc, void *arg, + int cpu) { - int cpu, thread; + int thread; - for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) { - for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { - int fd = FD(evsel, cpu, thread), - err = ioctl(fd, ioc, arg); + for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { + int fd = FD(evsel, cpu, thread), + err = ioctl(fd, ioc, arg); - if (err) - return err; - } + if (err) + return err; } return 0; } +int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu) +{ + return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu); +} + int perf_evsel__enable(struct perf_evsel *evsel) { - return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, 0); + int i; + int err = 0; + + for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++) + err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i); + return err; +} + +int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu) +{ + return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu); } int perf_evsel__disable(struct perf_evsel *evsel) { - return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, 0); + int i; + int err = 0; + + for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++) + err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i); + return err; } int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter) { - return perf_evsel__run_ioctl(evsel, + int err = 0, i; + + for (i = 0; i < evsel->cpus->nr && !err; i++) + err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_SET_FILTER, - (void *)filter); + (void *)filter, i); + return err; } struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) diff --git a/tools/perf/lib/include/perf/evsel.h b/tools/perf/lib/include/perf/evsel.h index e7add554f861..c82ec39a4ad0 100644 --- a/tools/perf/lib/include/perf/evsel.h +++ b/tools/perf/lib/include/perf/evsel.h @@ -30,7 +30,9 @@ LIBPERF_API void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu); LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, struct perf_counts_values *count); LIBPERF_API int perf_evsel__enable(struct perf_evsel *evsel); +LIBPERF_API int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu); LIBPERF_API int perf_evsel__disable(struct perf_evsel *evsel); +LIBPERF_API int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu); LIBPERF_API struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel); LIBPERF_API struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel); LIBPERF_API struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index aa180d1df50f..a69e64236120 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1223,16 +1223,27 @@ int perf_evsel__append_addr_filter(struct evsel *evsel, const char *filter) return perf_evsel__append_filter(evsel, "%s,%s", filter); } +/* Caller has to clear disabled after going through all CPUs. */ +int evsel__enable_cpu(struct evsel *evsel, int cpu) +{ + return perf_evsel__enable_cpu(&evsel->core, cpu); +} + int evsel__enable(struct evsel *evsel) { int err = perf_evsel__enable(&evsel->core); if (!err) evsel->disabled = false; - return err; } +/* Caller has to set disabled after going through all CPUs. */ +int evsel__disable_cpu(struct evsel *evsel, int cpu) +{ + return perf_evsel__disable_cpu(&evsel->core, cpu); +} + int evsel__disable(struct evsel *evsel) { int err = perf_evsel__disable(&evsel->core); diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index c8af4bc23f8f..dc14f4a823cd 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -222,8 +222,10 @@ int perf_evsel__set_filter(struct evsel *evsel, const char *filter); int perf_evsel__append_tp_filter(struct evsel *evsel, const char *filter); int perf_evsel__append_addr_filter(struct evsel *evsel, const char *filter); +int evsel__enable_cpu(struct evsel *evsel, int cpu); int evsel__enable(struct evsel *evsel); int evsel__disable(struct evsel *evsel); +int evsel__disable_cpu(struct evsel *evsel, int cpu); int perf_evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, -- cgit v1.2.3 From 704e2f5b700da4c912635cf161c3e982737eb89e Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 20 Nov 2019 16:15:22 -0800 Subject: perf stat: Use affinity for enabling/disabling events Restructure event enabling/disabling to use affinity, which minimizes the number of IPIs needed. Before on a large test case with 94 CPUs: % time seconds usecs/call calls errors syscall ------ ----------- ----------- --------- --------- ---------------- 54.65 1.899986 22 84812 660 ioctl after: 39.21 0.930451 10 84796 644 ioctl Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lore.kernel.org/lkml/20191121001522.180827-13-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evlist.c | 40 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) (limited to 'tools/perf') diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 096a4ea65b1b..1548237b6558 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -378,11 +378,28 @@ bool evsel__cpu_iter_skip(struct evsel *ev, int cpu) void evlist__disable(struct evlist *evlist) { struct evsel *pos; + struct affinity affinity; + int cpu, i; + + if (affinity__setup(&affinity) < 0) + return; + + evlist__for_each_cpu(evlist, i, cpu) { + affinity__set(&affinity, cpu); + evlist__for_each_entry(evlist, pos) { + if (evsel__cpu_iter_skip(pos, cpu)) + continue; + if (pos->disabled || !perf_evsel__is_group_leader(pos) || !pos->core.fd) + continue; + evsel__disable_cpu(pos, pos->cpu_iter - 1); + } + } + affinity__cleanup(&affinity); evlist__for_each_entry(evlist, pos) { - if (pos->disabled || !perf_evsel__is_group_leader(pos) || !pos->core.fd) + if (!perf_evsel__is_group_leader(pos) || !pos->core.fd) continue; - evsel__disable(pos); + pos->disabled = true; } evlist->enabled = false; @@ -391,11 +408,28 @@ void evlist__disable(struct evlist *evlist) void evlist__enable(struct evlist *evlist) { struct evsel *pos; + struct affinity affinity; + int cpu, i; + if (affinity__setup(&affinity) < 0) + return; + + evlist__for_each_cpu(evlist, i, cpu) { + affinity__set(&affinity, cpu); + + evlist__for_each_entry(evlist, pos) { + if (evsel__cpu_iter_skip(pos, cpu)) + continue; + if (!perf_evsel__is_group_leader(pos) || !pos->core.fd) + continue; + evsel__enable_cpu(pos, pos->cpu_iter - 1); + } + } + affinity__cleanup(&affinity); evlist__for_each_entry(evlist, pos) { if (!perf_evsel__is_group_leader(pos) || !pos->core.fd) continue; - evsel__enable(pos); + pos->disabled = false; } evlist->enabled = true; -- cgit v1.2.3 From fa7f7e7354957422b43ea950b672d3e731f27e68 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 26 Nov 2019 15:59:13 -0800 Subject: perf jit: Move test functionality in to a test Adds a test for minimal jit_write_elf functionality. Committer testing: # perf test jit 61: Test jit_write_elf : Ok # # perf test -v jit 61: Test jit_write_elf : --- start --- test child forked, pid 10460 Writing jit code to: /tmp/perf-test-KqxURR test child finished with 0 ---- end ---- Test jit_write_elf: Ok # Committer notes: Fix up the case where HAVE_JITDUMP is no defined. Signed-off-by: Ian Rogers Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Alexios Zavras Cc: Allison Randal Cc: Florian Fainelli Cc: Greg Kroah-Hartman Cc: Jiri Olsa Cc: Kate Stewart Cc: Leo Yan Cc: Mark Rutland Cc: Michael Petlan Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Song Liu Cc: Stephane Eranian Cc: Thomas Gleixner Link: http://lore.kernel.org/lkml/20191126235913.41855-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/Build | 1 + tools/perf/tests/builtin-test.c | 4 ++++ tools/perf/tests/genelf.c | 51 +++++++++++++++++++++++++++++++++++++++++ tools/perf/tests/tests.h | 1 + tools/perf/util/genelf.c | 46 ------------------------------------- 5 files changed, 57 insertions(+), 46 deletions(-) create mode 100644 tools/perf/tests/genelf.c (limited to 'tools/perf') diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build index a3c595fba943..1692529639b0 100644 --- a/tools/perf/tests/Build +++ b/tools/perf/tests/Build @@ -54,6 +54,7 @@ perf-y += unit_number__scnprintf.o perf-y += mem2node.o perf-y += maps.o perf-y += time-utils-test.o +perf-y += genelf.o $(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build $(call rule_mkdir) diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 82d19a8fcac7..5f05db75cdd8 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -301,6 +301,10 @@ static struct test generic_tests[] = { .desc = "time utils", .func = test__time_utils, }, + { + .desc = "Test jit_write_elf", + .func = test__jit_write_elf, + }, { .desc = "maps__merge_in", .func = test__maps__merge_in, diff --git a/tools/perf/tests/genelf.c b/tools/perf/tests/genelf.c new file mode 100644 index 000000000000..f797f9823e89 --- /dev/null +++ b/tools/perf/tests/genelf.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include + +#include "debug.h" +#include "tests.h" + +#ifdef HAVE_JITDUMP +#include +#include "../util/genelf.h" +#endif + +#define TEMPL "/tmp/perf-test-XXXXXX" + +int test__jit_write_elf(struct test *test __maybe_unused, + int subtest __maybe_unused) +{ +#ifdef HAVE_JITDUMP + static unsigned char x86_code[] = { + 0xBB, 0x2A, 0x00, 0x00, 0x00, /* movl $42, %ebx */ + 0xB8, 0x01, 0x00, 0x00, 0x00, /* movl $1, %eax */ + 0xCD, 0x80 /* int $0x80 */ + }; + char path[PATH_MAX]; + int fd, ret; + + strcpy(path, TEMPL); + + fd = mkstemp(path); + if (fd < 0) { + perror("mkstemp failed"); + return TEST_FAIL; + } + + pr_info("Writing jit code to: %s\n", path); + + ret = jit_write_elf(fd, 0, "main", x86_code, sizeof(x86_code), + NULL, 0, NULL, 0, 0); + close(fd); + + unlink(path); + + return ret ? TEST_FAIL : 0; +#else + return TEST_SKIP; +#endif +} diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index 4f9ae6af78ec..9a160fef47c9 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -110,6 +110,7 @@ int test__unit_number__scnprint(struct test *test, int subtest); int test__mem2node(struct test *t, int subtest); int test__maps__merge_in(struct test *t, int subtest); int test__time_utils(struct test *t, int subtest); +int test__jit_write_elf(struct test *test, int subtest); bool test__bp_signal_is_supported(void); bool test__bp_account_is_supported(void); diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c index f9f18b8b1df9..aed49806a09b 100644 --- a/tools/perf/util/genelf.c +++ b/tools/perf/util/genelf.c @@ -8,15 +8,12 @@ */ #include -#include -#include #include #include #include #include #include #include -#include #include #include #ifdef HAVE_DWARF_SUPPORT @@ -31,8 +28,6 @@ #define NT_GNU_BUILD_ID 3 #endif -#define JVMTI - #define BUILD_ID_URANDOM /* different uuid for each run */ #ifdef HAVE_LIBCRYPTO @@ -511,44 +506,3 @@ error: return retval; } - -#ifndef JVMTI - -static unsigned char x86_code[] = { - 0xBB, 0x2A, 0x00, 0x00, 0x00, /* movl $42, %ebx */ - 0xB8, 0x01, 0x00, 0x00, 0x00, /* movl $1, %eax */ - 0xCD, 0x80 /* int $0x80 */ -}; - -static struct options options; - -int main(int argc, char **argv) -{ - int c, fd, ret; - - while ((c = getopt(argc, argv, "o:h")) != -1) { - switch (c) { - case 'o': - options.output = optarg; - break; - case 'h': - printf("Usage: genelf -o output_file [-h]\n"); - return 0; - default: - errx(1, "unknown option"); - } - } - - fd = open(options.output, O_CREAT|O_TRUNC|O_RDWR, 0666); - if (fd == -1) - err(1, "cannot create file %s", options.output); - - ret = jit_write_elf(fd, "main", x86_code, sizeof(x86_code)); - close(fd); - - if (ret != 0) - unlink(options.output); - - return ret; -} -#endif -- cgit v1.2.3 From 77b91c1a525d84cb560a4baef6f5f548b5c23f80 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 29 Nov 2019 15:47:51 -0300 Subject: perf machine: Fill map_symbol->maps in append_inlines() to fix segfault I forgot to fill in the map_symbol->maps field in append_inlines() which then makes code down the line segfault when trying to deref it. It doesn't make any sense to have an addr_location with its 'map' member not NULL while its 'maps' is NULL, after all al->maps is where al->map is in. It is done that way so that we don't have to have in each 'struct map' a pointer to the 'struct maps' it is in, as we had in the past when we would have 'map->mg', before 'struct maps' was combined with 'struct map_groups', because there was always a one-to-one relationship for these structs. This fixes a segfault when processing DWARF callgraphs in 'perf report'. Reported-by: Jiri Olsa Cc: Adrian Hunter Cc: Namhyung Kim Fixes: 08f6680e627e ("perf tools: Add a 'struct map_groups' pointer to 'struct map_symbol'") Link: http://lore.kernel.org/lkml/20191129160631.GD26963@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/machine.c | 1 + 1 file changed, 1 insertion(+) (limited to 'tools/perf') diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 416d174d223c..c8c5410315e8 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -2446,6 +2446,7 @@ static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms list_for_each_entry(ilist, &inline_node->val, list) { struct map_symbol ilist_ms = { + .maps = ms->maps, .map = map, .sym = ilist->symbol, }; -- cgit v1.2.3 From bd5c6b81dd6025bd4c6ca7800a580b217d9899b9 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 2 Dec 2019 11:40:57 -0300 Subject: perf bench: Update the copies of x86's mem{cpy,set}_64.S And update linux/linkage.h, which requires in turn that we make these files switch from ENTRY()/ENDPROC() to SYM_FUNC_START()/SYM_FUNC_END(): tools/perf/arch/arm64/tests/regs_load.S tools/perf/arch/arm/tests/regs_load.S tools/perf/arch/powerpc/tests/regs_load.S tools/perf/arch/x86/tests/regs_load.S We also need to switch SYM_FUNC_START_LOCAL() to SYM_FUNC_START() for the functions used directly by 'perf bench', and update tools/perf/check_headers.sh to ignore those changes when checking if the kernel original files drifted from the copies we carry. This is to get the changes from: 6dcc5627f6ae ("x86/asm: Change all ENTRY+ENDPROC to SYM_FUNC_*") ef1e03152cb0 ("x86/asm: Make some functions local") e9b9d020c487 ("x86/asm: Annotate aliases") And address these tools/perf build warnings: Warning: Kernel ABI header at 'tools/arch/x86/lib/memcpy_64.S' differs from latest version at 'arch/x86/lib/memcpy_64.S' diff -u tools/arch/x86/lib/memcpy_64.S arch/x86/lib/memcpy_64.S Warning: Kernel ABI header at 'tools/arch/x86/lib/memset_64.S' differs from latest version at 'arch/x86/lib/memset_64.S' diff -u tools/arch/x86/lib/memset_64.S arch/x86/lib/memset_64.S Cc: Adrian Hunter Cc: Borislav Petkov Cc: Jiri Olsa Cc: Jiri Slaby Cc: Namhyung Kim Link: https://lkml.kernel.org/n/tip-tay3l8x8k11p7y3qcpqh9qh5@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/arm/tests/regs_load.S | 4 +- tools/perf/arch/arm64/tests/regs_load.S | 4 +- tools/perf/arch/x86/tests/regs_load.S | 8 +-- tools/perf/check-headers.sh | 4 +- tools/perf/util/include/linux/linkage.h | 89 +++++++++++++++++++++++++++++++-- 5 files changed, 96 insertions(+), 13 deletions(-) (limited to 'tools/perf') diff --git a/tools/perf/arch/arm/tests/regs_load.S b/tools/perf/arch/arm/tests/regs_load.S index 6e2495cc4517..4284307d7822 100644 --- a/tools/perf/arch/arm/tests/regs_load.S +++ b/tools/perf/arch/arm/tests/regs_load.S @@ -37,7 +37,7 @@ .text .type perf_regs_load,%function -ENTRY(perf_regs_load) +SYM_FUNC_START(perf_regs_load) str r0, [r0, #R0] str r1, [r0, #R1] str r2, [r0, #R2] @@ -56,4 +56,4 @@ ENTRY(perf_regs_load) str lr, [r0, #PC] // store pc as lr in order to skip the call // to this function mov pc, lr -ENDPROC(perf_regs_load) +SYM_FUNC_END(perf_regs_load) diff --git a/tools/perf/arch/arm64/tests/regs_load.S b/tools/perf/arch/arm64/tests/regs_load.S index 07042511dca9..d49de40b6818 100644 --- a/tools/perf/arch/arm64/tests/regs_load.S +++ b/tools/perf/arch/arm64/tests/regs_load.S @@ -7,7 +7,7 @@ #define LDR_REG(r) ldr x##r, [x0, 8 * r] #define SP (8 * 31) #define PC (8 * 32) -ENTRY(perf_regs_load) +SYM_FUNC_START(perf_regs_load) STR_REG(0) STR_REG(1) STR_REG(2) @@ -44,4 +44,4 @@ ENTRY(perf_regs_load) str x30, [x0, #PC] LDR_REG(1) ret -ENDPROC(perf_regs_load) +SYM_FUNC_END(perf_regs_load) diff --git a/tools/perf/arch/x86/tests/regs_load.S b/tools/perf/arch/x86/tests/regs_load.S index bbe5a0d16e51..80f14f52e3f6 100644 --- a/tools/perf/arch/x86/tests/regs_load.S +++ b/tools/perf/arch/x86/tests/regs_load.S @@ -28,7 +28,7 @@ .text #ifdef HAVE_ARCH_X86_64_SUPPORT -ENTRY(perf_regs_load) +SYM_FUNC_START(perf_regs_load) movq %rax, AX(%rdi) movq %rbx, BX(%rdi) movq %rcx, CX(%rdi) @@ -60,9 +60,9 @@ ENTRY(perf_regs_load) movq %r14, R14(%rdi) movq %r15, R15(%rdi) ret -ENDPROC(perf_regs_load) +SYM_FUNC_END(perf_regs_load) #else -ENTRY(perf_regs_load) +SYM_FUNC_START(perf_regs_load) push %edi movl 8(%esp), %edi movl %eax, AX(%edi) @@ -88,7 +88,7 @@ ENTRY(perf_regs_load) movl $0, FS(%edi) movl $0, GS(%edi) ret -ENDPROC(perf_regs_load) +SYM_FUNC_END(perf_regs_load) #endif /* diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh index a1dc16724352..68039a96c1dc 100755 --- a/tools/perf/check-headers.sh +++ b/tools/perf/check-headers.sh @@ -110,8 +110,8 @@ for i in $FILES; do done # diff with extra ignore lines -check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include "' -check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include "' +check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include " -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))"' +check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include " -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"' check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"' check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"' check include/linux/ctype.h '-I "isdigit("' diff --git a/tools/perf/util/include/linux/linkage.h b/tools/perf/util/include/linux/linkage.h index f01d48a8d707..b8a5159361b4 100644 --- a/tools/perf/util/include/linux/linkage.h +++ b/tools/perf/util/include/linux/linkage.h @@ -5,10 +5,93 @@ /* linkage.h ... for including arch/x86/lib/memcpy_64.S */ -#define ENTRY(name) \ - .globl name; \ +/* Some toolchains use other characters (e.g. '`') to mark new line in macro */ +#ifndef ASM_NL +#define ASM_NL ; +#endif + +#ifndef __ALIGN +#define __ALIGN .align 4,0x90 +#define __ALIGN_STR ".align 4,0x90" +#endif + +/* SYM_T_FUNC -- type used by assembler to mark functions */ +#ifndef SYM_T_FUNC +#define SYM_T_FUNC STT_FUNC +#endif + +/* SYM_A_* -- align the symbol? */ +#define SYM_A_ALIGN ALIGN + +/* SYM_L_* -- linkage of symbols */ +#define SYM_L_GLOBAL(name) .globl name +#define SYM_L_LOCAL(name) /* nothing */ + +#define ALIGN __ALIGN + +/* === generic annotations === */ + +/* SYM_ENTRY -- use only if you have to for non-paired symbols */ +#ifndef SYM_ENTRY +#define SYM_ENTRY(name, linkage, align...) \ + linkage(name) ASM_NL \ + align ASM_NL \ name: +#endif + +/* SYM_START -- use only if you have to */ +#ifndef SYM_START +#define SYM_START(name, linkage, align...) \ + SYM_ENTRY(name, linkage, align) +#endif + +/* SYM_END -- use only if you have to */ +#ifndef SYM_END +#define SYM_END(name, sym_type) \ + .type name sym_type ASM_NL \ + .size name, .-name +#endif + +/* + * SYM_FUNC_START_ALIAS -- use where there are two global names for one + * function + */ +#ifndef SYM_FUNC_START_ALIAS +#define SYM_FUNC_START_ALIAS(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) +#endif + +/* SYM_FUNC_START -- use for global functions */ +#ifndef SYM_FUNC_START +/* + * The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two + * later. + */ +#define SYM_FUNC_START(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) +#endif + +/* SYM_FUNC_START_LOCAL -- use for local functions */ +#ifndef SYM_FUNC_START_LOCAL +/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */ +#define SYM_FUNC_START_LOCAL(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) +#endif + +/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */ +#ifndef SYM_FUNC_END_ALIAS +#define SYM_FUNC_END_ALIAS(name) \ + SYM_END(name, SYM_T_FUNC) +#endif -#define ENDPROC(name) +/* + * SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START, + * SYM_FUNC_START_WEAK, ... + */ +#ifndef SYM_FUNC_END +/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */ +#define SYM_FUNC_END(name) \ + SYM_END(name, SYM_T_FUNC) +#endif #endif /* PERF_LINUX_LINKAGE_H_ */ -- cgit v1.2.3 From f6661125ff41e27b488f36422226653baad3c382 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 2 Dec 2019 13:02:25 -0300 Subject: perf beauty: Add CLEAR_SIGHAND support for clone's flags arg Add support for the recently added CLONE_CLEAR_SIGHAND flag. This takes advantage of the copy of the uapi/linux/sched.h we have in tools/include, which allows us to build tools/perf in older systems and have the binary support printing that flag whenever that system gets its kernel updated to one where this feature is present. Cc: Adrian Hunter Cc: Adrian Reber Cc: Christian Brauner Cc: Jiri Olsa Cc: Namhyung Kim --- tools/perf/trace/beauty/clone.c | 1 + 1 file changed, 1 insertion(+) (limited to 'tools/perf') diff --git a/tools/perf/trace/beauty/clone.c b/tools/perf/trace/beauty/clone.c index 1a8d3be2030e..062ca849c8fd 100644 --- a/tools/perf/trace/beauty/clone.c +++ b/tools/perf/trace/beauty/clone.c @@ -45,6 +45,7 @@ static size_t clone__scnprintf_flags(unsigned long flags, char *bf, size_t size, P_FLAG(NEWPID); P_FLAG(NEWNET); P_FLAG(IO); + P_FLAG(CLEAR_SIGHAND); #undef P_FLAG if (flags) -- cgit v1.2.3 From 9974406884459c9301597c2c9f7def6c38099ab4 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 2 Dec 2019 15:38:59 -0300 Subject: perf kvm: Clarify the 'perf kvm' -i and -o command line options The 'perf kvm' subcommand has options that it in turn passes to other perf subcommands such as 'report' and 'record', particularly -i and -o end up setting the same variable that will then be used for 'record's -o and report '-i', which ends up being confusing, leading some to think that both -i and -o can be used with 'report'. Improve the man page to state that -i is used with the post-processing subcommands while -o is used just with 'record' and that to save the output of 'report' one should simply redirect its output to a file. Noticed while reading the https://www.linux-kvm.org/page/Perf_events page. Cc: Adrian Hunter Cc: Jiri Olsa Cc: Namhyung Kim Cc: Steve Dickson Cc: William Cohen Link: https://lkml.kernel.org/n/tip-tclbttvmgtm525fvmh85f7d9@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-kvm.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'tools/perf') diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt index 6a5bb2b17039..cf95baef7b61 100644 --- a/tools/perf/Documentation/perf-kvm.txt +++ b/tools/perf/Documentation/perf-kvm.txt @@ -68,10 +68,11 @@ OPTIONS ------- -i:: --input=:: - Input file name. + Input file name, for the 'report', 'diff' and 'buildid-list' subcommands. -o:: --output=:: - Output file name. + Output file name, for the 'record' subcommand. Doesn't work with 'report', + just redirect the output to a file when using 'report'. --host:: Collect host side performance profile. --guest:: -- cgit v1.2.3 From ae87405fb511d6220ce86b9a60807fef92e1a934 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 Nov 2019 18:52:11 +0530 Subject: perf report/top TUI: Replace pr_err() with ui__error() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit pr_err() in TUI mode does not print anyting on the screen and just quits. Replace such pr_err() with ui__error(). Before: $ perf report -s + $ After: $ perf report -s + ┌─Error:────────────────┐ │Invalid --sort key: `+'│ │ │ │Press any key... │ └───────────────────────┘ Signed-off-by: Ravi Bangoria Acked-by: Jiri Olsa Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jin Yao Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Link: http://lore.kernel.org/lkml/20191114132213.5419-2-ravi.bangoria@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/sort.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'tools/perf') diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 345b5ccc90f6..106d795574ba 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -2681,12 +2681,12 @@ static int setup_sort_list(struct perf_hpp_list *list, char *str, ret = sort_dimension__add(list, tok, evlist, level); if (ret == -EINVAL) { if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) - pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); + ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); else - pr_err("Invalid --sort key: `%s'", tok); + ui__error("Invalid --sort key: `%s'", tok); break; } else if (ret == -ESRCH) { - pr_err("Unknown --sort key: `%s'", tok); + ui__error("Unknown --sort key: `%s'", tok); break; } } @@ -2743,7 +2743,7 @@ static int setup_sort_order(struct evlist *evlist) return 0; if (sort_order[1] == '\0') { - pr_err("Invalid --sort key: `+'"); + ui__error("Invalid --sort key: `+'"); return -EINVAL; } @@ -3034,7 +3034,7 @@ static int __setup_output_field(void) strp++; if (!strlen(strp)) { - pr_err("Invalid --fields key: `+'"); + ui__error("Invalid --fields key: `+'"); goto out; } -- cgit v1.2.3 From aa6b3c99236b49a3e842d7272efa2529f15f7d8a Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 Nov 2019 18:52:12 +0530 Subject: perf report: Make -F more strict like -s Currently -F allows branch-mode / mem-mode fields with -F even when perf report is not running in that mode. Don't allow that. Suggested-by: Arnaldo Carvalho de Melo Signed-off-by: Ravi Bangoria Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jin Yao Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Link: http://lore.kernel.org/lkml/20191114132213.5419-3-ravi.bangoria@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/sort.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'tools/perf') diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 106d795574ba..9fcba2872130 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -2959,6 +2959,9 @@ int output_field_add(struct perf_hpp_list *list, char *tok) if (strncasecmp(tok, sd->name, strlen(tok))) continue; + if (sort__mode != SORT_MODE__MEMORY) + return -EINVAL; + return __sort_dimension__add_output(list, sd); } @@ -2968,6 +2971,9 @@ int output_field_add(struct perf_hpp_list *list, char *tok) if (strncasecmp(tok, sd->name, strlen(tok))) continue; + if (sort__mode != SORT_MODE__BRANCH) + return -EINVAL; + return __sort_dimension__add_output(list, sd); } -- cgit v1.2.3 From bb30acae4c4dacfa2622387c5ad5563246810583 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 14 Nov 2019 18:52:13 +0530 Subject: perf report: Bail out --mem-mode if mem info is not available If perf.data is recorded without -d, don't allow user to use --mem-mode with 'perf report'. symbol_daddr and phys_daddr can be recorded separately and may be present in the perf.data but at the report time they are associated with mem-mode fields and thus this restriction applies to them as well. Before: $ perf record ls $ perf report --mem-mode --stdio # Overhead Local Weight Memory access Symbol # ........ ............ ............. ....................... 55.56% 0 N/A [k] 0xffffffff81a00ae7 After: $ perf report --mem-mode --stdio Error: Selected --mem-mode but no mem data. Did you call perf record without -d? Suggested-by: Arnaldo Carvalho de Melo Signed-off-by: Ravi Bangoria Acked-by: Jiri Olsa Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jin Yao Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Link: http://lore.kernel.org/lkml/20191114132213.5419-4-ravi.bangoria@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-report.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'tools/perf') diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 830d563de889..387311c67264 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -388,6 +388,14 @@ static int report__setup_sample_type(struct report *rep) } } + if (sort__mode == SORT_MODE__MEMORY) { + if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) { + ui__error("Selected --mem-mode but no mem data. " + "Did you call perf record without -d?\n"); + return -1; + } + } + if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) { if ((sample_type & PERF_SAMPLE_REGS_USER) && (sample_type & PERF_SAMPLE_STACK_USER)) { -- cgit v1.2.3 From 29f6eeca0e14b301d9c03a3164b852c318d6348a Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 4 Dec 2019 14:08:00 +0200 Subject: perf inject: Fix processing of ID index for injected instruction tracing The ID index event is used when decoding, but can result in the following error: $ perf record --aux-sample -e '{intel_pt//,branch-misses}:u' ls $ perf inject -i perf.data -o perf.data.inj --itrace=be $ perf script -i perf.data.inj 0x1020 [0x410]: failed to process type: 69 [No such file or directory] Fix by having 'perf inject' drop the ID index event. Fixes: c0a6de06c446 ("perf record: Add support for AUX area sampling") Signed-off-by: Adrian Hunter Cc: Jiri Olsa Link: http://lore.kernel.org/lkml/20191204120800.8138-1-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-inject.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'tools/perf') diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 9664a72a089d..7e124a7b8bfd 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -403,17 +403,6 @@ static int perf_event__repipe_tracing_data(struct perf_session *session, return err; } -static int perf_event__repipe_id_index(struct perf_session *session, - union perf_event *event) -{ - int err; - - perf_event__repipe_synth(session->tool, event); - err = perf_event__process_id_index(session, event); - - return err; -} - static int dso__read_build_id(struct dso *dso) { if (dso->has_build_id) @@ -651,7 +640,7 @@ static int __cmd_inject(struct perf_inject *inject) inject->tool.comm = perf_event__repipe_comm; inject->tool.namespaces = perf_event__repipe_namespaces; inject->tool.exit = perf_event__repipe_exit; - inject->tool.id_index = perf_event__repipe_id_index; + inject->tool.id_index = perf_event__process_id_index; inject->tool.auxtrace_info = perf_event__process_auxtrace_info; inject->tool.auxtrace = perf_event__process_auxtrace; inject->tool.aux = perf_event__drop_aux; -- cgit v1.2.3 From 05267c7eac12627fae3f25dfd203bfdb9941f9ca Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 11 Dec 2019 10:09:24 -0300 Subject: perf arch: Make the default get_cpuid() return compatible error Some of the functions calling get_cpuid() propagate back the error it returns, and all are using errno (positive) values, make the weak default get_cpuid() function return ENOSYS to be consistent and to allow checking if this is an arch not providing this function or if a provided one is having trouble getting the cpuid, to decide if the warning should be provided to the user or just a debug message should be emitted. Reviewed-by: Mark Rutland Tested-by: Mark Rutland Tested-by: John Garry # arm64 Acked-by: Jiri Olsa Cc: Adrian Hunter Cc: Namhyung Kim Cc: Will Deacon Link: https://lkml.kernel.org/n/tip-lxwjr0cd2eggzx04a780ffrv@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/header.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/perf') diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index becc2d109423..4d39a75551a0 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -850,7 +850,7 @@ int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) */ int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) { - return -1; + return ENOSYS; /* Not implemented */ } static int write_cpuid(struct feat_fd *ff, -- cgit v1.2.3 From 61208e6e1003b3fd8d2d1f2a72ec27be43955c0b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 11 Dec 2019 10:21:59 -0300 Subject: perf top: Do not bail out when perf_env__read_cpuid() returns ENOSYS 'perf top' stopped working on hw architectures that do not provide a get_cpuid() implementation and thus fallback to the weak get_cpuid() default function. This is done because at annotation time we may need it in the arch specific annotation init routine, but that is only being used by arches that do provide a get_cpuid() implementation: $ find tools/ -name "*.[ch]" | xargs grep 'evlist->env' tools/perf/builtin-top.c: top.evlist->env = &perf_env; tools/perf/util/evsel.c: return evsel->evlist->env; tools/perf/util/s390-cpumsf.c: sf->machine_type = s390_cpumsf_get_type(session->evlist->env->cpuid); tools/perf/util/header.c: session->evlist->env = &header->env; tools/perf/util/sample-raw.c: const char *arch_pf = perf_env__arch(evlist->env); $ $ find tools/perf/arch -name "*.[ch]" | xargs grep -w get_cpuid tools/perf/arch/x86/util/auxtrace.c: ret = get_cpuid(buffer, sizeof(buffer)); tools/perf/arch/x86/util/header.c:get_cpuid(char *buffer, size_t sz) tools/perf/arch/powerpc/util/header.c:get_cpuid(char *buffer, size_t sz) tools/perf/arch/s390/util/header.c: * Implementation of get_cpuid(). tools/perf/arch/s390/util/header.c:int get_cpuid(char *buffer, size_t sz) tools/perf/arch/s390/util/header.c: if (buf && get_cpuid(buf, 128)) $ For 'report' or 'script', i.e. tools working on perf.data files, that is setup while reading the header, its just top that needs to explicitely read it at tool start. Fixes: 608127f73779 ("perf top: Initialize perf_env->cpuid, needed by the per arch annotation init routine") Reported-by: John Garry Analysed-by: Jiri Olsa Reviewed-by: Mark Rutland Tested-by: Mark Rutland Tested-by: John Garry # arm64 Acked-by: Jiri Olsa Cc: Adrian Hunter Cc: Namhyung Kim Cc: Will Deacon Link: https://lkml.kernel.org/n/tip-lxwjr0cd2eggzx04a780ffrv@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-top.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'tools/perf') diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index dc80044bc46f..795e353de095 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1568,9 +1568,13 @@ int cmd_top(int argc, const char **argv) */ status = perf_env__read_cpuid(&perf_env); if (status) { - pr_err("Couldn't read the cpuid for this machine: %s\n", - str_error_r(errno, errbuf, sizeof(errbuf))); - goto out_delete_evlist; + /* + * Some arches do not provide a get_cpuid(), so just use pr_debug, otherwise + * warn the user explicitely. + */ + eprintf(status == ENOSYS ? 1 : 0, verbose, + "Couldn't read the cpuid for this machine: %s\n", + str_error_r(errno, errbuf, sizeof(errbuf))); } top.evlist->env = &perf_env; -- cgit v1.2.3 From 0dd674efaf63bc6bfd89909814db618abe1e7039 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Wed, 4 Dec 2019 21:51:21 +0530 Subject: perf/x86/pmu-events: Fix Kernel_Utilization metric Kernel Utilization should divide ref cycles spent in kernel with total ref cycles. Signed-off-by: Ravi Bangoria Reviewed-by: Andi Kleen Cc: Alexander Shishkin Cc: Haiyan Song Cc: Jiri Olsa Cc: Kan Liang Cc: Namhyung Kim Link: http://lore.kernel.org/lkml/20191204162121.29998-1-ravi.bangoria@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json | 2 +- tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json | 2 +- tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json | 2 +- tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json | 2 +- tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json | 2 +- tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json | 2 +- tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json | 2 +- tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json | 2 +- tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json | 2 +- tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json | 2 +- tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json | 2 +- tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) (limited to 'tools/perf') diff --git a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json index bc7151d639d7..45a34ce4fe89 100644 --- a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json +++ b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json @@ -297,7 +297,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json index 49c5f123d811..961fe4395758 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json +++ b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json @@ -115,7 +115,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json index 113d19e92678..746734ce09be 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json @@ -297,7 +297,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json index 2ba32af9bc36..f94653229dd4 100644 --- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json @@ -315,7 +315,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json index c80f16fde6d0..5402cd3120f9 100644 --- a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json +++ b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json @@ -267,7 +267,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json index e501729c3dd1..832f3cb40b34 100644 --- a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json @@ -267,7 +267,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json index e2446966b651..d69b2a8fc0bc 100644 --- a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json +++ b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json @@ -285,7 +285,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json index 9294769dec64..5f465fd81315 100644 --- a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json +++ b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json @@ -285,7 +285,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json index 603ff9c2e9a1..3e909b306003 100644 --- a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json +++ b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json @@ -171,7 +171,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json index c6b485b3a2cb..50c053235752 100644 --- a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json +++ b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json @@ -171,7 +171,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json index 0ca539bb60f6..e7feb60f9fa9 100644 --- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json +++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json @@ -303,7 +303,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json index 047d7e11aa6f..21d7a0c2c2e8 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json @@ -315,7 +315,7 @@ }, { "BriefDescription": "Fraction of cycles spent in Kernel mode", - "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, -- cgit v1.2.3 From eb573e746b9d4f0921dcb2449be3df41dae3caea Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Wed, 20 Nov 2019 14:10:59 +0530 Subject: perf metricgroup: Fix printing event names of metric group with multiple events Commit f01642e4912b ("perf metricgroup: Support multiple events for metricgroup") introduced support for multiple events in a metric group. But with the current upstream, metric events names are not printed properly In power9 platform: command:# ./perf stat --metric-only -M translation -C 0 -I 1000 sleep 2 1.000208486 2.000368863 2.001400558 Similarly in skylake platform: command:./perf stat --metric-only -M Power -I 1000 1.000579994 2.002189493 With current upstream version, issue is with event name comparison logic in find_evsel_group(). Current logic is to compare events belonging to a metric group to the events in perf_evlist. Since the break statement is missing in the loop used for comparison between metric group and perf_evlist events, the loop continues to execute even after getting a pattern match, and end up in discarding the matches. Incase of single metric event belongs to metric group, its working fine, because in case of single event once it compare all events it reaches to end of perf_evlist. Example for single metric event in power9 platform: command:# ./perf stat --metric-only -M branches_per_inst -I 1000 sleep 1 1.000094653 0.2 1.001337059 0.0 This patch fixes the issue by making sure once we found all events belongs to that metric event matched in find_evsel_group(), we successfully break from that loop by adding corresponding condition. With this patch: In power9 platform: command:# ./perf stat --metric-only -M translation -C 0 -I 1000 sleep 2 result:# time derat_4k_miss_rate_percent derat_4k_miss_ratio derat_miss_ratio derat_64k_miss_rate_percent derat_64k_miss_ratio dslb_miss_rate_percent islb_miss_rate_percent 1.000135672 0.0 0.3 1.0 0.0 0.2 0.0 0.0 2.000380617 0.0 0.0 0.0 0.0 0.0 0.0 0.0 command:# ./perf stat --metric-only -M Power -I 1000 Similarly in skylake platform: result:# time Turbo_Utilization C3_Core_Residency C6_Core_Residency C7_Core_Residency C2_Pkg_Residency C3_Pkg_Residency C6_Pkg_Residency C7_Pkg_Residency 1.000563580 0.3 0.0 2.6 44.2 21.9 0.0 0.0 0.0 2.002235027 0.4 0.0 2.7 43.0 20.7 0.0 0.0 0.0 Committer testing: Before: [root@seventh ~]# perf stat --metric-only -M Power -I 1000 # time 1.000383223 2.001168182 3.001968545 4.002741200 5.003442022 ^C 5.777687244 [root@seventh ~]# After the patch: [root@seventh ~]# perf stat --metric-only -M Power -I 1000 # time Turbo_Utilization C3_Core_Residency C6_Core_Residency C7_Core_Residency C2_Pkg_Residency C3_Pkg_Residency C6_Pkg_Residency C7_Pkg_Residency 1.000406577 0.4 0.1 1.4 97.0 0.0 0.0 0.0 0.0 2.001481572 0.3 0.0 0.6 97.9 0.0 0.0 0.0 0.0 3.002332585 0.2 0.0 1.0 97.5 0.0 0.0 0.0 0.0 4.003196624 0.2 0.0 0.3 98.6 0.0 0.0 0.0 0.0 5.004063851 0.3 0.0 0.7 97.7 0.0 0.0 0.0 0.0 ^C 5.471260276 0.2 0.0 0.5 49.3 0.0 0.0 0.0 0.0 [root@seventh ~]# [root@seventh ~]# dmesg | grep -i skylake [ 0.187807] Performance Events: PEBS fmt3+, Skylake events, 32-deep LBR, full-width counters, Intel PMU driver. [root@seventh ~]# Fixes: f01642e4912b ("perf metricgroup: Support multiple events for metricgroup") Signed-off-by: Kajol Jain Reviewed-by: Ravi Bangoria Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: Anju T Sudhakar Cc: Jin Yao Cc: Jiri Olsa Cc: Kan Liang Cc: Madhavan Srinivasan Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/20191120084059.24458-1-kjain@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/metricgroup.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'tools/perf') diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index 6a4d350d5cdb..02aee946b6c1 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -103,8 +103,11 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, if (!strcmp(ev->name, ids[i])) { if (!metric_events[i]) metric_events[i] = ev; + i++; + if (i == idnum) + break; } else { - if (++i == idnum) { + if (i + 1 == idnum) { /* Discard the whole match and start again */ i = 0; memset(metric_events, 0, @@ -124,7 +127,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, } } - if (i != idnum - 1) { + if (i != idnum) { /* Not whole match */ return NULL; } -- cgit v1.2.3 From 28707826877f84bce0977845ea529cbdd08e4e8d Mon Sep 17 00:00:00 2001 From: Michael Petlan Date: Sun, 8 Dec 2019 17:20:56 +0100 Subject: perf header: Fix false warning when there are no duplicate cache entries Before this patch, perf expected that there might be NPROC*4 unique cache entries at max, however, it also expected that some of them would be shared and/or of the same size, thus the final number of entries would be reduced to be lower than NPROC*4. In case the number of entries hadn't been reduced (was NPROC*4), the warning was printed. However, some systems might have unusual cache topology, such as the following two-processor KVM guest: cpu level shared_cpu_list size 0 1 0 32K 0 1 0 64K 0 2 0 512K 0 3 0 8192K 1 1 1 32K 1 1 1 64K 1 2 1 512K 1 3 1 8192K This KVM guest has 8 (NPROC*4) unique cache entries, which used to make perf printing the message, although there actually aren't "way too many cpu caches". v2: Removing unused argument. v3: Unifying the way we obtain number of cpus. v4: Removed '& UINT_MAX' construct which is redundant. Signed-off-by: Michael Petlan Acked-by: Jiri Olsa LPU-Reference: 20191208162056.20772-1-mpetlan@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/header.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) (limited to 'tools/perf') diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 4d39a75551a0..93ad27830e2b 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1089,21 +1089,18 @@ static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c) fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map); } -static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) +#define MAX_CACHE_LVL 4 + +static int build_caches(struct cpu_cache_level caches[], u32 *cntp) { u32 i, cnt = 0; - long ncpus; u32 nr, cpu; u16 level; - ncpus = sysconf(_SC_NPROCESSORS_CONF); - if (ncpus < 0) - return -1; - - nr = (u32)(ncpus & UINT_MAX); + nr = cpu__max_cpu(); for (cpu = 0; cpu < nr; cpu++) { - for (level = 0; level < 10; level++) { + for (level = 0; level < MAX_CACHE_LVL; level++) { struct cpu_cache_level c; int err; @@ -1123,18 +1120,12 @@ static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) caches[cnt++] = c; else cpu_cache_level__free(&c); - - if (WARN_ONCE(cnt == size, "way too many cpu caches..")) - goto out; } } - out: *cntp = cnt; return 0; } -#define MAX_CACHE_LVL 4 - static int write_cache(struct feat_fd *ff, struct evlist *evlist __maybe_unused) { @@ -1143,7 +1134,7 @@ static int write_cache(struct feat_fd *ff, u32 cnt = 0, i, version = 1; int ret; - ret = build_caches(caches, max_caches, &cnt); + ret = build_caches(caches, &cnt); if (ret) goto out; -- cgit v1.2.3 From 28396b7df09b9565f404591c9945eac43526cb3f Mon Sep 17 00:00:00 2001 From: Ed Maste Date: Thu, 12 Dec 2019 14:34:46 +0000 Subject: perf vendor events s390: Fix counter long description for DTLB1_GPAGE_WRITES The cf_z13 counter DTLB1_GPAGE_WRITES included a prefix 'Counter:132\tName:'. This is incorrect; remove the prefix as with 7fcfa9a2d9 for cf_z14. Signed-off-by: Ed Maste Cc: Alexander Shishkin Cc: Greentime Hu Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Nick Hu Cc: Peter Zijlstra Cc: Thomas Richter Cc: Vincent Chen Link: http://lore.kernel.org/lkml/20191212143446.88582-1-emaste@freefall.freebsd.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/pmu-events/arch/s390/cf_z13/extended.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/perf') diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json index 436ce33f1182..5da8296b667e 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json +++ b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json @@ -32,7 +32,7 @@ "EventCode": "132", "EventName": "DTLB1_GPAGE_WRITES", "BriefDescription": "DTLB1 Two-Gigabyte Page Writes", - "PublicDescription": "Counter:132 Name:DTLB1_GPAGE_WRITES A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a two-gigabyte page." + "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a two-gigabyte page." }, { "Unit": "CPU-M-CF", -- cgit v1.2.3 From 58b3bafff8257c6946df5d6aeb215b8ac839ed2a Mon Sep 17 00:00:00 2001 From: Ed Maste Date: Thu, 12 Dec 2019 14:53:46 +0000 Subject: perf vendor events s390: Remove name from L1D_RO_EXCL_WRITES description In 7fcfa9a2d9 an unintended prefix "Counter:18 Name:" was removed from the description for L1D_RO_EXCL_WRITES, but the extra name remained in the description. Remove it too. Fixes: 7fcfa9a2d9a7 ("perf list: Fix s390 counter long description for L1D_RO_EXCL_WRITES") Signed-off-by: Ed Maste Cc: Alexander Shishkin Cc: Greentime Hu Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Nick Hu Cc: Peter Zijlstra Cc: Thomas Richter Cc: Vincent Chen Link: http://lore.kernel.org/lkml/20191212145346.5026-1-emaste@freefall.freebsd.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/pmu-events/arch/s390/cf_z14/extended.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/perf') diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json index 68618152ea2c..89e070727e1b 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json +++ b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json @@ -4,7 +4,7 @@ "EventCode": "128", "EventName": "L1D_RO_EXCL_WRITES", "BriefDescription": "L1D Read-only Exclusive Writes", - "PublicDescription": "L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line" + "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line" }, { "Unit": "CPU-M-CF", -- cgit v1.2.3 From 0feba17bd7ee3b7e03d141f119049dcc23efa94e Mon Sep 17 00:00:00 2001 From: Jin Yao Date: Fri, 20 Dec 2019 09:37:19 +0800 Subject: perf report: Fix incorrectly added dimensions as switch perf data file We observed an issue that was some extra columns displayed after switching perf data file in browser. The steps to reproduce: 1. perf record -a -e cycles,instructions -- sleep 3 2. perf report --group 3. In browser, we use hotkey 's' to switch to another perf.data 4. Now in browser, the extra columns 'Self' and 'Children' are displayed. The issue is setup_sorting() executed again after repeat path, so dimensions are added again. This patch checks the last key returned from __cmd_report(). If it's K_SWITCH_INPUT_DATA, skips the setup_sorting(). Fixes: ad0de0971b7f ("perf report: Enable the runtime switching of perf data file") Signed-off-by: Jin Yao Tested-by: Arnaldo Carvalho de Melo Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: Feng Tang Cc: Jin Yao Cc: Kan Liang Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/20191220013722.20592-1-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-report.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'tools/perf') diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 387311c67264..de988589d99b 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -1076,6 +1076,7 @@ int cmd_report(int argc, const char **argv) struct stat st; bool has_br_stack = false; int branch_mode = -1; + int last_key = 0; bool branch_call_mode = false; #define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent" static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n" @@ -1450,7 +1451,8 @@ repeat: sort_order = sort_tmp; } - if (setup_sorting(session->evlist) < 0) { + if ((last_key != K_SWITCH_INPUT_DATA) && + (setup_sorting(session->evlist) < 0)) { if (sort_order) parse_options_usage(report_usage, options, "s", 1); if (field_order) @@ -1530,6 +1532,7 @@ repeat: ret = __cmd_report(&report); if (ret == K_SWITCH_INPUT_DATA) { perf_session__delete(session); + last_key = K_SWITCH_INPUT_DATA; goto repeat; } else ret = 0; -- cgit v1.2.3 From a75af86b6f344f99825cc894596804a91a2ee9cf Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 18 Dec 2019 15:23:14 -0300 Subject: perf map: Set kmap->kmaps backpointer for main kernel map chunks When a map is create to represent the main kernel area (vmlinux) with map__new2() we allocate an extra area to store a pointer to the 'struct maps' for the kernel maps, so that we can access that struct when loading ELF files or kallsyms, as we will need to split it in multiple maps, one per kernel module or ELF section (such as ".init.text"). So when map->dso->kernel is non-zero, it is expected that map__kmap(map)->kmaps to be set to the tree of kernel maps (modules, chunks of the main kernel, bpf progs put in place via PERF_RECORD_KSYMBOL, the main kernel). This was not the case when we were splitting the main kernel into chunks for its ELF sections, which ended up making 'perf report --children' processing a perf.data file with callchains to trip on __map__is_kernel(), when we press ENTER to see the popup menu for main histogram entries that starts at a symbol in the ".init.text" ELF section, e.g.: - 8.83% 0.00% swapper [kernel.vmlinux].init.text [k] start_kernel start_kernel cpu_startup_entry do_idle cpuidle_enter cpuidle_enter_state intel_idle Fix it. Reviewed-by: Jiri Olsa Cc: Adrian Hunter Cc: Namhyung Kim Link: https://lore.kernel.org/lkml/20191218190120.GB13282@kernel.org/ Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol-elf.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'tools/perf') diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 6658fbf196e6..1965aefccb02 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -920,6 +920,9 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map, if (curr_map == NULL) return -1; + if (curr_dso->kernel) + map__kmap(curr_map)->kmaps = kmaps; + if (adjust_kernel_syms) { curr_map->start = shdr->sh_addr + ref_reloc(kmap); curr_map->end = curr_map->start + shdr->sh_size; -- cgit v1.2.3 From 55347ec340af401437680fd0e88df6739a967f9f Mon Sep 17 00:00:00 2001 From: Yuya Fujita Date: Thu, 19 Dec 2019 08:08:32 +0000 Subject: perf hists: Fix variable name's inconsistency in hists__for_each() macro Variable names are inconsistent in hists__for_each macro(). Due to this inconsistency, the macro replaces its second argument with "fmt" regardless of its original name. So far it works because only "fmt" is passed to the second argument. However, this behavior is not expected and should be fixed. Fixes: f0786af536bb ("perf hists: Introduce hists__for_each_format macro") Fixes: aa6f50af822a ("perf hists: Introduce hists__for_each_sort_list macro") Signed-off-by: Yuya Fujita Acked-by: Jiri Olsa Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/OSAPR01MB1588E1C47AC22043175DE1B2E8520@OSAPR01MB1588.jpnprd01.prod.outlook.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/hist.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/perf') diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 45286900aacb..0aa63aeb58ec 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -339,10 +339,10 @@ static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format) list_for_each_entry_safe(format, tmp, &(_list)->sorts, sort_list) #define hists__for_each_format(hists, format) \ - perf_hpp_list__for_each_format((hists)->hpp_list, fmt) + perf_hpp_list__for_each_format((hists)->hpp_list, format) #define hists__for_each_sort_list(hists, format) \ - perf_hpp_list__for_each_sort_list((hists)->hpp_list, fmt) + perf_hpp_list__for_each_sort_list((hists)->hpp_list, format) extern struct perf_hpp_fmt perf_hpp__format[]; -- cgit v1.2.3