From 96a2c464de07d7c72988db851c029b204fc59108 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 1 Aug 2009 01:34:24 +0200 Subject: tracing/bkl: Add bkl ftrace events Add two events lock_kernel and unlock_kernel() to trace the bkl uses. This opens the door for userspace tools to perform statistics about the callsites that use it, dependencies with other locks (by pairing the trace with lock events), use with recursivity and so on... The {__reacquire,release}_kernel_lock() events are not traced because these are called from schedule, thus the sched events are sufficient to trace them. Example of a trace: hald-addon-stor-4152 [000] 165.875501: unlock_kernel: depth: 0, fs/block_dev.c:1358 __blkdev_put() hald-addon-stor-4152 [000] 167.832974: lock_kernel: depth: 0, fs/block_dev.c:1167 __blkdev_get() How to get the callsites that acquire it recursively: cd /debug/tracing/events/bkl echo "lock_depth > 0" > filter firefox-4951 [001] 206.276967: unlock_kernel: depth: 1, fs/reiserfs/super.c:575 reiserfs_dirty_inode() You can also filter by file and/or line. v2: Use of FILTER_PTR_STRING attribute for files and lines fields to make them traceable. Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Li Zefan --- include/linux/smp_lock.h | 19 ++++++++++++--- include/trace/events/bkl.h | 61 ++++++++++++++++++++++++++++++++++++++++++++++ lib/kernel_lock.c | 11 +++++---- 3 files changed, 82 insertions(+), 9 deletions(-) create mode 100644 include/trace/events/bkl.h diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index 813be59bf345..d48cc77ba70d 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h @@ -3,6 +3,7 @@ #ifdef CONFIG_LOCK_KERNEL #include +#include #define kernel_locked() (current->lock_depth >= 0) @@ -24,8 +25,18 @@ static inline int reacquire_kernel_lock(struct task_struct *task) return 0; } -extern void __lockfunc lock_kernel(void) __acquires(kernel_lock); -extern void __lockfunc unlock_kernel(void) __releases(kernel_lock); +extern void __lockfunc _lock_kernel(void) __acquires(kernel_lock); +extern void __lockfunc _unlock_kernel(void) __releases(kernel_lock); + +#define lock_kernel() { \ + trace_lock_kernel(__func__, __FILE__, __LINE__); \ + _lock_kernel(); \ +} + +#define unlock_kernel() { \ + trace_unlock_kernel(__func__, __FILE__, __LINE__); \ + _unlock_kernel(); \ +} /* * Various legacy drivers don't really need the BKL in a specific @@ -41,8 +52,8 @@ static inline void cycle_kernel_lock(void) #else -#define lock_kernel() do { } while(0) -#define unlock_kernel() do { } while(0) +#define lock_kernel() trace_lock_kernel(__func__, __FILE__, __LINE__); +#define unlock_kernel() trace_unlock_kernel(__func__, __FILE__, __LINE__); #define release_kernel_lock(task) do { } while(0) #define cycle_kernel_lock() do { } while(0) #define reacquire_kernel_lock(task) 0 diff --git a/include/trace/events/bkl.h b/include/trace/events/bkl.h new file mode 100644 index 000000000000..8abd620a490e --- /dev/null +++ b/include/trace/events/bkl.h @@ -0,0 +1,61 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM bkl + +#if !defined(_TRACE_BKL_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_BKL_H + +#include + +TRACE_EVENT(lock_kernel, + + TP_PROTO(const char *func, const char *file, int line), + + TP_ARGS(func, file, line), + + TP_STRUCT__entry( + __field( int, lock_depth ) + __field_ext( const char *, func, FILTER_PTR_STRING ) + __field_ext( const char *, file, FILTER_PTR_STRING ) + __field( int, line ) + ), + + TP_fast_assign( + /* We want to record the lock_depth after lock is acquired */ + __entry->lock_depth = current->lock_depth + 1; + __entry->func = func; + __entry->file = file; + __entry->line = line; + ), + + TP_printk("depth: %d, %s:%d %s()", __entry->lock_depth, + __entry->file, __entry->line, __entry->func) +); + +TRACE_EVENT(unlock_kernel, + + TP_PROTO(const char *func, const char *file, int line), + + TP_ARGS(func, file, line), + + TP_STRUCT__entry( + __field(int, lock_depth) + __field(const char *, func) + __field(const char *, file) + __field(int, line) + ), + + TP_fast_assign( + __entry->lock_depth = current->lock_depth; + __entry->func = func; + __entry->file = file; + __entry->line = line; + ), + + TP_printk("depth: %d, %s:%d %s()", __entry->lock_depth, + __entry->file, __entry->line, __entry->func) +); + +#endif /* _TRACE_BKL_H */ + +/* This part must be outside protection */ +#include diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 39f1029e3525..5c10b2e1fd08 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -5,10 +5,11 @@ * relegated to obsolescence, but used by various less * important (or lazy) subsystems. */ -#include #include #include #include +#define CREATE_TRACE_POINTS +#include /* * The 'big kernel lock' @@ -113,7 +114,7 @@ static inline void __unlock_kernel(void) * This cannot happen asynchronously, so we only need to * worry about other CPU's. */ -void __lockfunc lock_kernel(void) +void __lockfunc _lock_kernel(void) { int depth = current->lock_depth+1; if (likely(!depth)) @@ -121,13 +122,13 @@ void __lockfunc lock_kernel(void) current->lock_depth = depth; } -void __lockfunc unlock_kernel(void) +void __lockfunc _unlock_kernel(void) { BUG_ON(current->lock_depth < 0); if (likely(--current->lock_depth < 0)) __unlock_kernel(); } -EXPORT_SYMBOL(lock_kernel); -EXPORT_SYMBOL(unlock_kernel); +EXPORT_SYMBOL(_lock_kernel); +EXPORT_SYMBOL(_unlock_kernel); -- cgit v1.2.3 From 737f453fd115ea0c9642ed6b30e37e296a4e3ed7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 1 Aug 2009 03:42:44 +0200 Subject: tracing/filters: Cleanup useless headers Cleanup remaining headers inclusion that were only useful when the filter framework and its tracing related filesystem user interface weren't yet separated. v2: Keep module.h, needed for EXPORT_SYMBOL_GPL Signed-off-by: Frederic Weisbecker Cc: Tom Zanussi Cc: Steven Rostedt Cc: Li Zefan --- kernel/trace/trace_events_filter.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 23245785927f..189663d82aa7 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -18,8 +18,6 @@ * Copyright (C) 2009 Tom Zanussi */ -#include -#include #include #include #include -- cgit v1.2.3 From f3f3f0092477d0165f3f1bf0fd518550b2abd097 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 24 Sep 2009 15:27:41 +0200 Subject: tracing/event: Cleanup the useless dentry variable Cleanup the useless dentry variable while creating a kernel event set of files. trace_create_file() warns if it fails to create the file anyway, and we don't store the dentry anywhere. v2: Fix a small conflict in kernel/trace/trace_events.c Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Li Zefan --- kernel/trace/trace_events.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 56c260b83a9c..8c91b7c8f047 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -898,9 +898,9 @@ event_subsystem_dir(const char *name, struct dentry *d_events) "'%s/filter' entry\n", name); } - entry = trace_create_file("enable", 0644, system->entry, - (void *)system->name, - &ftrace_system_enable_fops); + trace_create_file("enable", 0644, system->entry, + (void *)system->name, + &ftrace_system_enable_fops); return system->entry; } @@ -912,7 +912,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, const struct file_operations *filter, const struct file_operations *format) { - struct dentry *entry; int ret; /* @@ -930,12 +929,12 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, } if (call->regfunc) - entry = trace_create_file("enable", 0644, call->dir, call, - enable); + trace_create_file("enable", 0644, call->dir, call, + enable); if (call->id && call->profile_enable) - entry = trace_create_file("id", 0444, call->dir, call, - id); + trace_create_file("id", 0444, call->dir, call, + id); if (call->define_fields) { ret = call->define_fields(call); @@ -944,16 +943,16 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, " events/%s\n", call->name); return ret; } - entry = trace_create_file("filter", 0644, call->dir, call, - filter); + trace_create_file("filter", 0644, call->dir, call, + filter); } /* A trace may not want to export its format */ if (!call->show_format) return 0; - entry = trace_create_file("format", 0444, call->dir, call, - format); + trace_create_file("format", 0444, call->dir, call, + format); return 0; } -- cgit v1.2.3 From cbfeb267cb0ff632dbc8ff02685012bee2e87434 Mon Sep 17 00:00:00 2001 From: John Kacur Date: Thu, 24 Sep 2009 18:01:51 +0200 Subject: perf annotate: Add the cmp_null function and make use of it This function exists in builtin-report.c but not in builtin-annotate.c Functions that use cmp_null are shorter and clearer. Synchronizing functions between these two files will also make it easier to potential share code in the future. Signed-off-by: John Kacur Cc: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 1ec741615814..a33087328bd4 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -82,6 +82,16 @@ struct sort_entry { size_t (*print)(FILE *fp, struct hist_entry *); }; +static int64_t cmp_null(void *l, void *r) +{ + if (!l && !r) + return 0; + else if (!l) + return -1; + else + return 1; +} + /* --sort pid */ static int64_t @@ -116,14 +126,8 @@ sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) char *comm_l = left->thread->comm; char *comm_r = right->thread->comm; - if (!comm_l || !comm_r) { - if (!comm_l && !comm_r) - return 0; - else if (!comm_l) - return -1; - else - return 1; - } + if (!comm_l || !comm_r) + return cmp_null(comm_l, comm_r); return strcmp(comm_l, comm_r); } @@ -149,14 +153,8 @@ sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) struct dso *dso_l = left->dso; struct dso *dso_r = right->dso; - if (!dso_l || !dso_r) { - if (!dso_l && !dso_r) - return 0; - else if (!dso_l) - return -1; - else - return 1; - } + if (!dso_l || !dso_r) + return cmp_null(dso_l, dso_r); return strcmp(dso_l->name, dso_r->name); } -- cgit v1.2.3 From 8b40f521cf1c9750eab0c04da9075e7484675e9c Mon Sep 17 00:00:00 2001 From: John Kacur Date: Thu, 24 Sep 2009 18:02:18 +0200 Subject: perf tools: Protect header files with a consistent style There was a colorful mix of header guards - standardize them. Signed-off-by: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/util/cache.h | 6 +++--- tools/perf/util/callchain.h | 2 +- tools/perf/util/color.h | 6 +++--- tools/perf/util/debug.h | 4 ++++ tools/perf/util/event.h | 3 ++- tools/perf/util/exec_cmd.h | 6 +++--- tools/perf/util/header.h | 6 +++--- tools/perf/util/help.h | 6 +++--- tools/perf/util/levenshtein.h | 6 +++--- tools/perf/util/module.h | 6 +++--- tools/perf/util/parse-events.h | 6 +++--- tools/perf/util/parse-options.h | 6 +++--- tools/perf/util/quote.h | 6 +++--- tools/perf/util/run-command.h | 6 +++--- tools/perf/util/sigchain.h | 6 +++--- tools/perf/util/strbuf.h | 6 +++--- tools/perf/util/string.h | 6 +++--- tools/perf/util/strlist.h | 6 +++--- tools/perf/util/svghelper.h | 6 +++--- tools/perf/util/symbol.h | 6 +++--- tools/perf/util/thread.h | 5 +++++ tools/perf/util/trace-event.h | 6 +++--- tools/perf/util/types.h | 6 +++--- tools/perf/util/values.h | 6 +++--- 24 files changed, 72 insertions(+), 62 deletions(-) diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h index 6f8ea9d210b6..f26172c0c919 100644 --- a/tools/perf/util/cache.h +++ b/tools/perf/util/cache.h @@ -1,5 +1,5 @@ -#ifndef CACHE_H -#define CACHE_H +#ifndef __PERF_CACHE_H +#define __PERF_CACHE_H #include "util.h" #include "strbuf.h" @@ -117,4 +117,4 @@ extern char *perf_pathdup(const char *fmt, ...) extern size_t strlcpy(char *dest, const char *src, size_t size); -#endif /* CACHE_H */ +#endif /* __PERF_CACHE_H */ diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 43cf3ea9e088..ad4626de4c2b 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -58,4 +58,4 @@ static inline u64 cumul_hits(struct callchain_node *node) int register_callchain_param(struct callchain_param *param); void append_chain(struct callchain_node *root, struct ip_callchain *chain, struct symbol **syms); -#endif +#endif /* __PERF_CALLCHAIN_H */ diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h index 58d597564b99..24e8809210bb 100644 --- a/tools/perf/util/color.h +++ b/tools/perf/util/color.h @@ -1,5 +1,5 @@ -#ifndef COLOR_H -#define COLOR_H +#ifndef __PERF_COLOR_H +#define __PERF_COLOR_H /* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */ #define COLOR_MAXLEN 24 @@ -39,4 +39,4 @@ int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *bu int percent_color_fprintf(FILE *fp, const char *fmt, double percent); const char *get_percent_color(double percent); -#endif /* COLOR_H */ +#endif /* __PERF_COLOR_H */ diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index 437eea58ce40..02d1fa1c2465 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h @@ -1,4 +1,6 @@ /* For debugging general purposes */ +#ifndef __PERF_DEBUG_H +#define __PERF_DEBUG_H extern int verbose; extern int dump_trace; @@ -6,3 +8,5 @@ extern int dump_trace; int eprintf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); void trace_event(event_t *event); + +#endif /* __PERF_DEBUG_H */ diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 2c9c26d6ded0..c31a5da6458b 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -1,5 +1,6 @@ #ifndef __PERF_RECORD_H #define __PERF_RECORD_H + #include "../perf.h" #include "util.h" #include @@ -101,4 +102,4 @@ struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *self, FILE *fp); -#endif +#endif /* __PERF_RECORD_H */ diff --git a/tools/perf/util/exec_cmd.h b/tools/perf/util/exec_cmd.h index effe25eb1545..31647ac92ed1 100644 --- a/tools/perf/util/exec_cmd.h +++ b/tools/perf/util/exec_cmd.h @@ -1,5 +1,5 @@ -#ifndef PERF_EXEC_CMD_H -#define PERF_EXEC_CMD_H +#ifndef __PERF_EXEC_CMD_H +#define __PERF_EXEC_CMD_H extern void perf_set_argv_exec_path(const char *exec_path); extern const char *perf_extract_argv0_path(const char *path); @@ -10,4 +10,4 @@ extern int execv_perf_cmd(const char **argv); /* NULL terminated */ extern int execl_perf_cmd(const char *cmd, ...); extern const char *system_path(const char *path); -#endif /* PERF_EXEC_CMD_H */ +#endif /* __PERF_EXEC_CMD_H */ diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index a0761bc7863c..a2916b652a1b 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -1,5 +1,5 @@ -#ifndef _PERF_HEADER_H -#define _PERF_HEADER_H +#ifndef __PERF_HEADER_H +#define __PERF_HEADER_H #include "../../../include/linux/perf_event.h" #include @@ -44,4 +44,4 @@ perf_header__find_attr(u64 id, struct perf_header *header); struct perf_header *perf_header__new(void); -#endif /* _PERF_HEADER_H */ +#endif /* __PERF_HEADER_H */ diff --git a/tools/perf/util/help.h b/tools/perf/util/help.h index 7128783637b4..7f5c6dedd714 100644 --- a/tools/perf/util/help.h +++ b/tools/perf/util/help.h @@ -1,5 +1,5 @@ -#ifndef HELP_H -#define HELP_H +#ifndef __PERF_HELP_H +#define __PERF_HELP_H struct cmdnames { size_t alloc; @@ -26,4 +26,4 @@ int is_in_cmdlist(struct cmdnames *c, const char *s); void list_commands(const char *title, struct cmdnames *main_cmds, struct cmdnames *other_cmds); -#endif /* HELP_H */ +#endif /* __PERF_HELP_H */ diff --git a/tools/perf/util/levenshtein.h b/tools/perf/util/levenshtein.h index 0173abeef52c..b0fcb6d8a881 100644 --- a/tools/perf/util/levenshtein.h +++ b/tools/perf/util/levenshtein.h @@ -1,8 +1,8 @@ -#ifndef LEVENSHTEIN_H -#define LEVENSHTEIN_H +#ifndef __PERF_LEVENSHTEIN_H +#define __PERF_LEVENSHTEIN_H int levenshtein(const char *string1, const char *string2, int swap_penalty, int substition_penalty, int insertion_penalty, int deletion_penalty); -#endif +#endif /* __PERF_LEVENSHTEIN_H */ diff --git a/tools/perf/util/module.h b/tools/perf/util/module.h index 8a592ef641ca..098e0412bc22 100644 --- a/tools/perf/util/module.h +++ b/tools/perf/util/module.h @@ -1,5 +1,5 @@ -#ifndef _PERF_MODULE_ -#define _PERF_MODULE_ 1 +#ifndef __PERF_MODULE_ +#define __PERF_MODULE_ 1 #include #include "../types.h" @@ -50,4 +50,4 @@ size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp); struct module *mod_dso__find_module(struct mod_dso *self, const char *name); int mod_dso__load_modules(struct mod_dso *dso); -#endif /* _PERF_MODULE_ */ +#endif /* __PERF_MODULE_ */ diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 30c608112845..8626a439033d 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -1,5 +1,5 @@ -#ifndef _PARSE_EVENTS_H -#define _PARSE_EVENTS_H +#ifndef __PERF_PARSE_EVENTS_H +#define __PERF_PARSE_EVENTS_H /* * Parse symbolic events/counts passed in as options: */ @@ -31,4 +31,4 @@ extern char debugfs_path[]; extern int valid_debugfs_mount(const char *debugfs); -#endif /* _PARSE_EVENTS_H */ +#endif /* __PERF_PARSE_EVENTS_H */ diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h index 2ee248ff27e5..948805af43c2 100644 --- a/tools/perf/util/parse-options.h +++ b/tools/perf/util/parse-options.h @@ -1,5 +1,5 @@ -#ifndef PARSE_OPTIONS_H -#define PARSE_OPTIONS_H +#ifndef __PERF_PARSE_OPTIONS_H +#define __PERF_PARSE_OPTIONS_H enum parse_opt_type { /* special types */ @@ -174,4 +174,4 @@ extern int parse_opt_verbosity_cb(const struct option *, const char *, int); extern const char *parse_options_fix_filename(const char *prefix, const char *file); -#endif +#endif /* __PERF_PARSE_OPTIONS_H */ diff --git a/tools/perf/util/quote.h b/tools/perf/util/quote.h index a5454a1d1c13..b6a019733919 100644 --- a/tools/perf/util/quote.h +++ b/tools/perf/util/quote.h @@ -1,5 +1,5 @@ -#ifndef QUOTE_H -#define QUOTE_H +#ifndef __PERF_QUOTE_H +#define __PERF_QUOTE_H #include #include @@ -65,4 +65,4 @@ extern void perl_quote_print(FILE *stream, const char *src); extern void python_quote_print(FILE *stream, const char *src); extern void tcl_quote_print(FILE *stream, const char *src); -#endif +#endif /* __PERF_QUOTE_H */ diff --git a/tools/perf/util/run-command.h b/tools/perf/util/run-command.h index cc1837deba88..d79028727ce2 100644 --- a/tools/perf/util/run-command.h +++ b/tools/perf/util/run-command.h @@ -1,5 +1,5 @@ -#ifndef RUN_COMMAND_H -#define RUN_COMMAND_H +#ifndef __PERF_RUN_COMMAND_H +#define __PERF_RUN_COMMAND_H enum { ERR_RUN_COMMAND_FORK = 10000, @@ -85,4 +85,4 @@ struct async { int start_async(struct async *async); int finish_async(struct async *async); -#endif +#endif /* __PERF_RUN_COMMAND_H */ diff --git a/tools/perf/util/sigchain.h b/tools/perf/util/sigchain.h index 618083bce0c6..1a53c11265fd 100644 --- a/tools/perf/util/sigchain.h +++ b/tools/perf/util/sigchain.h @@ -1,5 +1,5 @@ -#ifndef SIGCHAIN_H -#define SIGCHAIN_H +#ifndef __PERF_SIGCHAIN_H +#define __PERF_SIGCHAIN_H typedef void (*sigchain_fun)(int); @@ -8,4 +8,4 @@ int sigchain_pop(int sig); void sigchain_push_common(sigchain_fun f); -#endif /* SIGCHAIN_H */ +#endif /* __PERF_SIGCHAIN_H */ diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h index d2aa86c014c1..a3d121d6c83e 100644 --- a/tools/perf/util/strbuf.h +++ b/tools/perf/util/strbuf.h @@ -1,5 +1,5 @@ -#ifndef STRBUF_H -#define STRBUF_H +#ifndef __PERF_STRBUF_H +#define __PERF_STRBUF_H /* * Strbuf's can be use in many ways: as a byte array, or to store arbitrary @@ -134,4 +134,4 @@ extern int launch_editor(const char *path, struct strbuf *buffer, const char *co extern int strbuf_branchname(struct strbuf *sb, const char *name); extern int strbuf_check_branch_ref(struct strbuf *sb, const char *name); -#endif /* STRBUF_H */ +#endif /* __PERF_STRBUF_H */ diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h index bf39dfadfd24..15c827475e7d 100644 --- a/tools/perf/util/string.h +++ b/tools/perf/util/string.h @@ -1,5 +1,5 @@ -#ifndef _PERF_STRING_H_ -#define _PERF_STRING_H_ +#ifndef __PERF_STRING_H_ +#define __PERF_STRING_H_ #include "types.h" @@ -8,4 +8,4 @@ int hex2u64(const char *ptr, u64 *val); #define _STR(x) #x #define STR(x) _STR(x) -#endif +#endif /* __PERF_STRING_H */ diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h index 921818e44a54..cb4659306d7b 100644 --- a/tools/perf/util/strlist.h +++ b/tools/perf/util/strlist.h @@ -1,5 +1,5 @@ -#ifndef STRLIST_H_ -#define STRLIST_H_ +#ifndef __PERF_STRLIST_H +#define __PERF_STRLIST_H #include #include @@ -36,4 +36,4 @@ static inline unsigned int strlist__nr_entries(const struct strlist *self) } int strlist__parse_list(struct strlist *self, const char *s); -#endif /* STRLIST_H_ */ +#endif /* __PERF_STRLIST_H */ diff --git a/tools/perf/util/svghelper.h b/tools/perf/util/svghelper.h index cd93195aedb3..e0781989cc31 100644 --- a/tools/perf/util/svghelper.h +++ b/tools/perf/util/svghelper.h @@ -1,5 +1,5 @@ -#ifndef _INCLUDE_GUARD_SVG_HELPER_ -#define _INCLUDE_GUARD_SVG_HELPER_ +#ifndef __PERF_SVGHELPER_H +#define __PERF_SVGHELPER_H #include "types.h" @@ -25,4 +25,4 @@ extern void svg_close(void); extern int svg_page_width; -#endif +#endif /* __PERF_SVGHELPER_H */ diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 6e8490716408..ee164f659ed3 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -1,5 +1,5 @@ -#ifndef _PERF_SYMBOL_ -#define _PERF_SYMBOL_ 1 +#ifndef __PERF_SYMBOL +#define __PERF_SYMBOL 1 #include #include "types.h" @@ -89,4 +89,4 @@ extern struct dso *vdso; extern struct dso *hypervisor_dso; extern const char *vmlinux_name; extern int modules; -#endif /* _PERF_SYMBOL_ */ +#endif /* __PERF_SYMBOL */ diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 32aea3c1c2ad..693ed1ea10b4 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -1,3 +1,6 @@ +#ifndef __PERF_THREAD_H +#define __PERF_THREAD_H + #include #include #include @@ -20,3 +23,5 @@ void thread__insert_map(struct thread *self, struct map *map); int thread__fork(struct thread *self, struct thread *parent); struct map *thread__find_map(struct thread *self, u64 ip); size_t threads__fprintf(FILE *fp, struct rb_root *threads); + +#endif /* __PERF_THREAD_H */ diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index 693f815c9429..162c3e6deb93 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -1,5 +1,5 @@ -#ifndef _TRACE_EVENTS_H -#define _TRACE_EVENTS_H +#ifndef __PERF_TRACE_EVENTS_H +#define __PERF_TRACE_EVENTS_H #include "parse-events.h" @@ -242,4 +242,4 @@ void *raw_field_ptr(struct event *event, const char *name, void *data); void read_tracing_data(struct perf_event_attr *pattrs, int nb_events); -#endif /* _TRACE_EVENTS_H */ +#endif /* __PERF_TRACE_EVENTS_H */ diff --git a/tools/perf/util/types.h b/tools/perf/util/types.h index 5e75f9005940..7d6b8331f898 100644 --- a/tools/perf/util/types.h +++ b/tools/perf/util/types.h @@ -1,5 +1,5 @@ -#ifndef _PERF_TYPES_H -#define _PERF_TYPES_H +#ifndef __PERF_TYPES_H +#define __PERF_TYPES_H /* * We define u64 as unsigned long long for every architecture @@ -14,4 +14,4 @@ typedef signed short s16; typedef unsigned char u8; typedef signed char s8; -#endif /* _PERF_TYPES_H */ +#endif /* __PERF_TYPES_H */ diff --git a/tools/perf/util/values.h b/tools/perf/util/values.h index cadf8cf2a590..2fa967e1a88a 100644 --- a/tools/perf/util/values.h +++ b/tools/perf/util/values.h @@ -1,5 +1,5 @@ -#ifndef _PERF_VALUES_H -#define _PERF_VALUES_H +#ifndef __PERF_VALUES_H +#define __PERF_VALUES_H #include "types.h" @@ -24,4 +24,4 @@ void perf_read_values_add_value(struct perf_read_values *values, void perf_read_values_display(FILE *fp, struct perf_read_values *values, int raw); -#endif /* _PERF_VALUES_H */ +#endif /* __PERF_VALUES_H */ -- cgit v1.2.3 From dd68ada2d417e57b848822a1407b5317a54136c5 Mon Sep 17 00:00:00 2001 From: John Kacur Date: Thu, 24 Sep 2009 18:02:49 +0200 Subject: perf tools: Create util/sort.and use it Create util/sort.[ch] and move common functionality for builtin-report.c and builtin-annotate.c there, and make use of it. Signed-off-by: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 + tools/perf/builtin-annotate.c | 211 +---------------------------- tools/perf/builtin-report.c | 307 +----------------------------------------- tools/perf/util/sort.c | 268 ++++++++++++++++++++++++++++++++++++ tools/perf/util/sort.h | 93 +++++++++++++ 5 files changed, 373 insertions(+), 508 deletions(-) create mode 100644 tools/perf/util/sort.c create mode 100644 tools/perf/util/sort.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index b5f1953b6144..0a9e5aede318 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -339,6 +339,7 @@ LIB_H += util/symbol.h LIB_H += util/module.h LIB_H += util/color.h LIB_H += util/values.h +LIB_H += util/sort.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o @@ -374,6 +375,7 @@ LIB_OBJS += util/trace-event-parse.o LIB_OBJS += util/trace-event-read.o LIB_OBJS += util/trace-event-info.o LIB_OBJS += util/svghelper.o +LIB_OBJS += util/sort.o BUILTIN_OBJS += builtin-annotate.o BUILTIN_OBJS += builtin-help.o diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index a33087328bd4..059c565b31ea 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -22,12 +22,10 @@ #include "util/parse-options.h" #include "util/parse-events.h" #include "util/thread.h" +#include "util/sort.h" static char const *input_name = "perf.data"; -static char default_sort_order[] = "comm,symbol"; -static char *sort_order = default_sort_order; - static int force; static int input; static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; @@ -55,207 +53,6 @@ struct sym_ext { static struct rb_root hist; -struct hist_entry { - struct rb_node rb_node; - - struct thread *thread; - struct map *map; - struct dso *dso; - struct symbol *sym; - u64 ip; - char level; - - uint32_t count; -}; - -/* - * configurable sorting bits - */ - -struct sort_entry { - struct list_head list; - - const char *header; - - int64_t (*cmp)(struct hist_entry *, struct hist_entry *); - int64_t (*collapse)(struct hist_entry *, struct hist_entry *); - size_t (*print)(FILE *fp, struct hist_entry *); -}; - -static int64_t cmp_null(void *l, void *r) -{ - if (!l && !r) - return 0; - else if (!l) - return -1; - else - return 1; -} - -/* --sort pid */ - -static int64_t -sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return right->thread->pid - left->thread->pid; -} - -static size_t -sort__thread_print(FILE *fp, struct hist_entry *self) -{ - return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid); -} - -static struct sort_entry sort_thread = { - .header = " Command: Pid", - .cmp = sort__thread_cmp, - .print = sort__thread_print, -}; - -/* --sort comm */ - -static int64_t -sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return right->thread->pid - left->thread->pid; -} - -static int64_t -sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) -{ - char *comm_l = left->thread->comm; - char *comm_r = right->thread->comm; - - if (!comm_l || !comm_r) - return cmp_null(comm_l, comm_r); - - return strcmp(comm_l, comm_r); -} - -static size_t -sort__comm_print(FILE *fp, struct hist_entry *self) -{ - return fprintf(fp, "%16s", self->thread->comm); -} - -static struct sort_entry sort_comm = { - .header = " Command", - .cmp = sort__comm_cmp, - .collapse = sort__comm_collapse, - .print = sort__comm_print, -}; - -/* --sort dso */ - -static int64_t -sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct dso *dso_l = left->dso; - struct dso *dso_r = right->dso; - - if (!dso_l || !dso_r) - return cmp_null(dso_l, dso_r); - - return strcmp(dso_l->name, dso_r->name); -} - -static size_t -sort__dso_print(FILE *fp, struct hist_entry *self) -{ - if (self->dso) - return fprintf(fp, "%-25s", self->dso->name); - - return fprintf(fp, "%016llx ", (u64)self->ip); -} - -static struct sort_entry sort_dso = { - .header = "Shared Object ", - .cmp = sort__dso_cmp, - .print = sort__dso_print, -}; - -/* --sort symbol */ - -static int64_t -sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) -{ - u64 ip_l, ip_r; - - if (left->sym == right->sym) - return 0; - - ip_l = left->sym ? left->sym->start : left->ip; - ip_r = right->sym ? right->sym->start : right->ip; - - return (int64_t)(ip_r - ip_l); -} - -static size_t -sort__sym_print(FILE *fp, struct hist_entry *self) -{ - size_t ret = 0; - - if (verbose) - ret += fprintf(fp, "%#018llx ", (u64)self->ip); - - if (self->sym) { - ret += fprintf(fp, "[%c] %s", - self->dso == kernel_dso ? 'k' : '.', self->sym->name); - } else { - ret += fprintf(fp, "%#016llx", (u64)self->ip); - } - - return ret; -} - -static struct sort_entry sort_sym = { - .header = "Symbol", - .cmp = sort__sym_cmp, - .print = sort__sym_print, -}; - -static int sort__need_collapse = 0; - -struct sort_dimension { - const char *name; - struct sort_entry *entry; - int taken; -}; - -static struct sort_dimension sort_dimensions[] = { - { .name = "pid", .entry = &sort_thread, }, - { .name = "comm", .entry = &sort_comm, }, - { .name = "dso", .entry = &sort_dso, }, - { .name = "symbol", .entry = &sort_sym, }, -}; - -static LIST_HEAD(hist_entry__sort_list); - -static int sort_dimension__add(char *tok) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { - struct sort_dimension *sd = &sort_dimensions[i]; - - if (sd->taken) - continue; - - if (strncasecmp(tok, sd->name, strlen(tok))) - continue; - - if (sd->entry->collapse) - sort__need_collapse = 1; - - list_add_tail(&sd->entry->list, &hist_entry__sort_list); - sd->taken = 1; - - return 0; - } - - return -ESRCH; -} - static int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) { @@ -1137,5 +934,11 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used) setup_pager(); + if (field_sep && *field_sep == '.') { + fputs("'.' is the only non valid --field-separator argument\n", + stderr); + exit(129); + } + return __cmd_annotate(); } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 19669c20088e..7b43504900ff 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -27,15 +27,13 @@ #include "util/parse-events.h" #include "util/thread.h" +#include "util/sort.h" static char const *input_name = "perf.data"; -static char default_sort_order[] = "comm,dso,symbol"; -static char *sort_order = default_sort_order; static char *dso_list_str, *comm_list_str, *sym_list_str, *col_width_list_str; static struct strlist *dso_list, *comm_list, *sym_list; -static char *field_sep; static int force; static int input; @@ -53,10 +51,6 @@ static char *pretty_printing_style = default_pretty_printing_style; static unsigned long page_size; static unsigned long mmap_window = 32; -static char default_parent_pattern[] = "^sys_|^do_page_fault"; -static char *parent_pattern = default_parent_pattern; -static regex_t parent_regex; - static int exclude_other = 1; static char callchain_default_opt[] = "fractal,0.5"; @@ -80,304 +74,8 @@ struct callchain_param callchain_param = { static u64 sample_type; -static int repsep_fprintf(FILE *fp, const char *fmt, ...) -{ - int n; - va_list ap; - - va_start(ap, fmt); - if (!field_sep) - n = vfprintf(fp, fmt, ap); - else { - char *bf = NULL; - n = vasprintf(&bf, fmt, ap); - if (n > 0) { - char *sep = bf; - - while (1) { - sep = strchr(sep, *field_sep); - if (sep == NULL) - break; - *sep = '.'; - } - } - fputs(bf, fp); - free(bf); - } - va_end(ap); - return n; -} - -static unsigned int dsos__col_width, - comms__col_width, - threads__col_width; - -/* - * histogram, sorted on item, collects counts - */ - static struct rb_root hist; -struct hist_entry { - struct rb_node rb_node; - - struct thread *thread; - struct map *map; - struct dso *dso; - struct symbol *sym; - struct symbol *parent; - u64 ip; - char level; - struct callchain_node callchain; - struct rb_root sorted_chain; - - u64 count; -}; - -/* - * configurable sorting bits - */ - -struct sort_entry { - struct list_head list; - - const char *header; - - int64_t (*cmp)(struct hist_entry *, struct hist_entry *); - int64_t (*collapse)(struct hist_entry *, struct hist_entry *); - size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width); - unsigned int *width; - bool elide; -}; - -static int64_t cmp_null(void *l, void *r) -{ - if (!l && !r) - return 0; - else if (!l) - return -1; - else - return 1; -} - -/* --sort pid */ - -static int64_t -sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return right->thread->pid - left->thread->pid; -} - -static size_t -sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width) -{ - return repsep_fprintf(fp, "%*s:%5d", width - 6, - self->thread->comm ?: "", self->thread->pid); -} - -static struct sort_entry sort_thread = { - .header = "Command: Pid", - .cmp = sort__thread_cmp, - .print = sort__thread_print, - .width = &threads__col_width, -}; - -/* --sort comm */ - -static int64_t -sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return right->thread->pid - left->thread->pid; -} - -static int64_t -sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) -{ - char *comm_l = left->thread->comm; - char *comm_r = right->thread->comm; - - if (!comm_l || !comm_r) - return cmp_null(comm_l, comm_r); - - return strcmp(comm_l, comm_r); -} - -static size_t -sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width) -{ - return repsep_fprintf(fp, "%*s", width, self->thread->comm); -} - -static struct sort_entry sort_comm = { - .header = "Command", - .cmp = sort__comm_cmp, - .collapse = sort__comm_collapse, - .print = sort__comm_print, - .width = &comms__col_width, -}; - -/* --sort dso */ - -static int64_t -sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct dso *dso_l = left->dso; - struct dso *dso_r = right->dso; - - if (!dso_l || !dso_r) - return cmp_null(dso_l, dso_r); - - return strcmp(dso_l->name, dso_r->name); -} - -static size_t -sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width) -{ - if (self->dso) - return repsep_fprintf(fp, "%-*s", width, self->dso->name); - - return repsep_fprintf(fp, "%*llx", width, (u64)self->ip); -} - -static struct sort_entry sort_dso = { - .header = "Shared Object", - .cmp = sort__dso_cmp, - .print = sort__dso_print, - .width = &dsos__col_width, -}; - -/* --sort symbol */ - -static int64_t -sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) -{ - u64 ip_l, ip_r; - - if (left->sym == right->sym) - return 0; - - ip_l = left->sym ? left->sym->start : left->ip; - ip_r = right->sym ? right->sym->start : right->ip; - - return (int64_t)(ip_r - ip_l); -} - -static size_t -sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used) -{ - size_t ret = 0; - - if (verbose) - ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, - dso__symtab_origin(self->dso)); - - ret += repsep_fprintf(fp, "[%c] ", self->level); - if (self->sym) { - ret += repsep_fprintf(fp, "%s", self->sym->name); - - if (self->sym->module) - ret += repsep_fprintf(fp, "\t[%s]", - self->sym->module->name); - } else { - ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip); - } - - return ret; -} - -static struct sort_entry sort_sym = { - .header = "Symbol", - .cmp = sort__sym_cmp, - .print = sort__sym_print, -}; - -/* --sort parent */ - -static int64_t -sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct symbol *sym_l = left->parent; - struct symbol *sym_r = right->parent; - - if (!sym_l || !sym_r) - return cmp_null(sym_l, sym_r); - - return strcmp(sym_l->name, sym_r->name); -} - -static size_t -sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width) -{ - return repsep_fprintf(fp, "%-*s", width, - self->parent ? self->parent->name : "[other]"); -} - -static unsigned int parent_symbol__col_width; - -static struct sort_entry sort_parent = { - .header = "Parent symbol", - .cmp = sort__parent_cmp, - .print = sort__parent_print, - .width = &parent_symbol__col_width, -}; - -static int sort__need_collapse = 0; -static int sort__has_parent = 0; - -struct sort_dimension { - const char *name; - struct sort_entry *entry; - int taken; -}; - -static struct sort_dimension sort_dimensions[] = { - { .name = "pid", .entry = &sort_thread, }, - { .name = "comm", .entry = &sort_comm, }, - { .name = "dso", .entry = &sort_dso, }, - { .name = "symbol", .entry = &sort_sym, }, - { .name = "parent", .entry = &sort_parent, }, -}; - -static LIST_HEAD(hist_entry__sort_list); - -static int sort_dimension__add(const char *tok) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { - struct sort_dimension *sd = &sort_dimensions[i]; - - if (sd->taken) - continue; - - if (strncasecmp(tok, sd->name, strlen(tok))) - continue; - - if (sd->entry->collapse) - sort__need_collapse = 1; - - if (sd->entry == &sort_parent) { - int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); - if (ret) { - char err[BUFSIZ]; - - regerror(ret, &parent_regex, err, sizeof(err)); - fprintf(stderr, "Invalid regex: %s\n%s", - parent_pattern, err); - exit(-1); - } - sort__has_parent = 1; - } - - list_add_tail(&sd->entry->list, &hist_entry__sort_list); - sd->taken = 1; - - return 0; - } - - return -ESRCH; -} - static int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) { @@ -1606,7 +1304,8 @@ setup: return 0; } -static const char * const report_usage[] = { +//static const char * const report_usage[] = { +const char * const report_usage[] = { "perf report [] ", NULL }; diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c new file mode 100644 index 000000000000..50e75abb1fdd --- /dev/null +++ b/tools/perf/util/sort.c @@ -0,0 +1,268 @@ +#include "sort.h" + +regex_t parent_regex; +char default_parent_pattern[] = "^sys_|^do_page_fault"; +char *parent_pattern = default_parent_pattern; +char default_sort_order[] = "comm,dso,symbol"; +char *sort_order = default_sort_order; +int sort__need_collapse = 0; +int sort__has_parent = 0; + +unsigned int dsos__col_width; +unsigned int comms__col_width; +unsigned int threads__col_width; +static unsigned int parent_symbol__col_width; +char * field_sep; + +LIST_HEAD(hist_entry__sort_list); + +struct sort_entry sort_thread = { + .header = "Command: Pid", + .cmp = sort__thread_cmp, + .print = sort__thread_print, + .width = &threads__col_width, +}; + +struct sort_entry sort_comm = { + .header = "Command", + .cmp = sort__comm_cmp, + .collapse = sort__comm_collapse, + .print = sort__comm_print, + .width = &comms__col_width, +}; + +struct sort_entry sort_dso = { + .header = "Shared Object", + .cmp = sort__dso_cmp, + .print = sort__dso_print, + .width = &dsos__col_width, +}; + +struct sort_entry sort_sym = { + .header = "Symbol", + .cmp = sort__sym_cmp, + .print = sort__sym_print, +}; + +struct sort_entry sort_parent = { + .header = "Parent symbol", + .cmp = sort__parent_cmp, + .print = sort__parent_print, + .width = &parent_symbol__col_width, +}; + +struct sort_dimension { + const char *name; + struct sort_entry *entry; + int taken; +}; + +static struct sort_dimension sort_dimensions[] = { + { .name = "pid", .entry = &sort_thread, }, + { .name = "comm", .entry = &sort_comm, }, + { .name = "dso", .entry = &sort_dso, }, + { .name = "symbol", .entry = &sort_sym, }, + { .name = "parent", .entry = &sort_parent, }, +}; + +int64_t cmp_null(void *l, void *r) +{ + if (!l && !r) + return 0; + else if (!l) + return -1; + else + return 1; +} + +/* --sort pid */ + +int64_t +sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return right->thread->pid - left->thread->pid; +} + +int repsep_fprintf(FILE *fp, const char *fmt, ...) +{ + int n; + va_list ap; + + va_start(ap, fmt); + if (!field_sep) + n = vfprintf(fp, fmt, ap); + else { + char *bf = NULL; + n = vasprintf(&bf, fmt, ap); + if (n > 0) { + char *sep = bf; + + while (1) { + sep = strchr(sep, *field_sep); + if (sep == NULL) + break; + *sep = '.'; + } + } + fputs(bf, fp); + free(bf); + } + va_end(ap); + return n; +} + +size_t +sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width) +{ + return repsep_fprintf(fp, "%*s:%5d", width - 6, + self->thread->comm ?: "", self->thread->pid); +} + +size_t +sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width) +{ + return repsep_fprintf(fp, "%*s", width, self->thread->comm); +} + +/* --sort dso */ + +int64_t +sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct dso *dso_l = left->dso; + struct dso *dso_r = right->dso; + + if (!dso_l || !dso_r) + return cmp_null(dso_l, dso_r); + + return strcmp(dso_l->name, dso_r->name); +} + +size_t +sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width) +{ + if (self->dso) + return repsep_fprintf(fp, "%-*s", width, self->dso->name); + + return repsep_fprintf(fp, "%*llx", width, (u64)self->ip); +} + +/* --sort symbol */ + +int64_t +sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) +{ + u64 ip_l, ip_r; + + if (left->sym == right->sym) + return 0; + + ip_l = left->sym ? left->sym->start : left->ip; + ip_r = right->sym ? right->sym->start : right->ip; + + return (int64_t)(ip_r - ip_l); +} + + +size_t +sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used) +{ + size_t ret = 0; + + if (verbose) + ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, + dso__symtab_origin(self->dso)); + + ret += repsep_fprintf(fp, "[%c] ", self->level); + if (self->sym) { + ret += repsep_fprintf(fp, "%s", self->sym->name); + + if (self->sym->module) + ret += repsep_fprintf(fp, "\t[%s]", + self->sym->module->name); + } else { + ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip); + } + + return ret; +} + +/* --sort comm */ + +int64_t +sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return right->thread->pid - left->thread->pid; +} + +int64_t +sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) +{ + char *comm_l = left->thread->comm; + char *comm_r = right->thread->comm; + + if (!comm_l || !comm_r) + return cmp_null(comm_l, comm_r); + + return strcmp(comm_l, comm_r); +} + +/* --sort parent */ + +int64_t +sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct symbol *sym_l = left->parent; + struct symbol *sym_r = right->parent; + + if (!sym_l || !sym_r) + return cmp_null(sym_l, sym_r); + + return strcmp(sym_l->name, sym_r->name); +} + +size_t +sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width) +{ + return repsep_fprintf(fp, "%-*s", width, + self->parent ? self->parent->name : "[other]"); +} + +int sort_dimension__add(const char *tok) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { + struct sort_dimension *sd = &sort_dimensions[i]; + + if (sd->taken) + continue; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + if (sd->entry->collapse) + sort__need_collapse = 1; + + if (sd->entry == &sort_parent) { + int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); + if (ret) { + char err[BUFSIZ]; + + regerror(ret, &parent_regex, err, sizeof(err)); + fprintf(stderr, "Invalid regex: %s\n%s", + parent_pattern, err); + exit(-1); + } + sort__has_parent = 1; + } + + list_add_tail(&sd->entry->list, &hist_entry__sort_list); + sd->taken = 1; + + return 0; + } + + return -ESRCH; +} + diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h new file mode 100644 index 000000000000..4684fd6d5c4a --- /dev/null +++ b/tools/perf/util/sort.h @@ -0,0 +1,93 @@ +#ifndef __PERF_SORT_H +#define __PERF_SORT_H +#include "../builtin.h" + +#include "util.h" + +#include "color.h" +#include +#include "cache.h" +#include +#include "symbol.h" +#include "string.h" +#include "callchain.h" +#include "strlist.h" +#include "values.h" + +#include "../perf.h" +#include "debug.h" +#include "header.h" + +#include "parse-options.h" +#include "parse-events.h" + +#include "thread.h" +#include "sort.h" + +extern regex_t parent_regex; +extern char *sort_order; +extern char default_parent_pattern[]; +extern char *parent_pattern; +extern char default_sort_order[]; +extern int sort__need_collapse; +extern int sort__has_parent; +extern char *field_sep; +extern struct sort_entry sort_comm; +extern struct sort_entry sort_dso; +extern struct sort_entry sort_sym; +extern struct sort_entry sort_parent; +extern unsigned int dsos__col_width; +extern unsigned int comms__col_width; +extern unsigned int threads__col_width; + +struct hist_entry { + struct rb_node rb_node; + + struct thread *thread; + struct map *map; + struct dso *dso; + struct symbol *sym; + struct symbol *parent; + u64 ip; + char level; + struct callchain_node callchain; + struct rb_root sorted_chain; + + u64 count; +}; + +/* + * configurable sorting bits + */ + +struct sort_entry { + struct list_head list; + + const char *header; + + int64_t (*cmp)(struct hist_entry *, struct hist_entry *); + int64_t (*collapse)(struct hist_entry *, struct hist_entry *); + size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width); + unsigned int *width; + bool elide; +}; + +extern struct sort_entry sort_thread; +extern struct list_head hist_entry__sort_list; + +extern int repsep_fprintf(FILE *fp, const char *fmt, ...); +extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int); +extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int); +extern size_t sort__dso_print(FILE *, struct hist_entry *, unsigned int); +extern size_t sort__sym_print(FILE *, struct hist_entry *, unsigned int __used); +extern int64_t cmp_null(void *, void *); +extern int64_t sort__thread_cmp(struct hist_entry *, struct hist_entry *); +extern int64_t sort__comm_cmp(struct hist_entry *, struct hist_entry *); +extern int64_t sort__comm_collapse(struct hist_entry *, struct hist_entry *); +extern int64_t sort__dso_cmp(struct hist_entry *, struct hist_entry *); +extern int64_t sort__sym_cmp(struct hist_entry *, struct hist_entry *); +extern int64_t sort__parent_cmp(struct hist_entry *, struct hist_entry *); +extern size_t sort__parent_print(FILE *, struct hist_entry *, unsigned int); +extern int sort_dimension__add(const char *); + +#endif /* __PERF_SORT_H */ -- cgit v1.2.3 From 1889d20922d14a97b2099fa4d47587217c0ba48b Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 24 Sep 2009 21:10:44 +0200 Subject: tracing/filters: Provide basic regex support This patch provides basic support for regular expressions in filters. It supports the following types of regexp: - *match_beginning - *match_middle* - match_end* - !don't match Example: cd /debug/tracing/events/bkl/lock_kernel echo 'file == "*reiserfs*"' > filter echo 1 > enable gedit-4941 [000] 457.735437: lock_kernel: depth: 0, fs/reiserfs/namei.c:334 reiserfs_lookup() sync_supers-227 [001] 461.379985: lock_kernel: depth: 0, fs/reiserfs/super.c:69 reiserfs_sync_fs() sync_supers-227 [000] 461.383096: lock_kernel: depth: 0, fs/reiserfs/journal.c:1069 flush_commit_list() reiserfs/1-1369 [001] 461.479885: lock_kernel: depth: 0, fs/reiserfs/journal.c:3509 flush_async_commits() Every string is now handled as a regexp in the filter framework, which helps to factorize the code for handling both simple strings and regexp comparisons. (The regexp parsing code has been wildly cherry picked from ftrace.c written by Steve.) v2: Simplify the whole and drop the filter_regex file Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Tom Zanussi Cc: Li Zefan --- kernel/trace/trace.h | 27 ++++--- kernel/trace/trace_events_filter.c | 155 +++++++++++++++++++++++++++++++++---- 2 files changed, 157 insertions(+), 25 deletions(-) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 86bcff94791a..8d0db6018fe4 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -702,20 +702,29 @@ struct event_subsystem { }; struct filter_pred; +struct regex; typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, int val1, int val2); +typedef int (*regex_match_func)(char *str, struct regex *r, int len); + +struct regex { + char pattern[MAX_FILTER_STR_VAL]; + int len; + int field_len; + regex_match_func match; +}; + struct filter_pred { - filter_pred_fn_t fn; - u64 val; - char str_val[MAX_FILTER_STR_VAL]; - int str_len; - char *field_name; - int offset; - int not; - int op; - int pop_n; + filter_pred_fn_t fn; + u64 val; + struct regex regex; + char *field_name; + int offset; + int not; + int op; + int pop_n; }; extern void print_event_filter(struct ftrace_event_call *call, diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 189663d82aa7..d3c94c139567 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -195,9 +195,9 @@ static int filter_pred_string(struct filter_pred *pred, void *event, char *addr = (char *)(event + pred->offset); int cmp, match; - cmp = strncmp(addr, pred->str_val, pred->str_len); + cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len); - match = (!cmp) ^ pred->not; + match = cmp ^ pred->not; return match; } @@ -209,9 +209,9 @@ static int filter_pred_pchar(struct filter_pred *pred, void *event, char **addr = (char **)(event + pred->offset); int cmp, match; - cmp = strncmp(*addr, pred->str_val, pred->str_len); + cmp = pred->regex.match(*addr, &pred->regex, pred->regex.field_len); - match = (!cmp) ^ pred->not; + match = cmp ^ pred->not; return match; } @@ -235,9 +235,9 @@ static int filter_pred_strloc(struct filter_pred *pred, void *event, char *addr = (char *)(event + str_loc); int cmp, match; - cmp = strncmp(addr, pred->str_val, str_len); + cmp = pred->regex.match(addr, &pred->regex, str_len); - match = (!cmp) ^ pred->not; + match = cmp ^ pred->not; return match; } @@ -248,6 +248,126 @@ static int filter_pred_none(struct filter_pred *pred, void *event, return 0; } +/* Basic regex callbacks */ +static int regex_match_full(char *str, struct regex *r, int len) +{ + if (strncmp(str, r->pattern, len) == 0) + return 1; + return 0; +} + +static int regex_match_front(char *str, struct regex *r, int len) +{ + if (strncmp(str, r->pattern, len) == 0) + return 1; + return 0; +} + +static int regex_match_middle(char *str, struct regex *r, int len) +{ + if (strstr(str, r->pattern)) + return 1; + return 0; +} + +static int regex_match_end(char *str, struct regex *r, int len) +{ + char *ptr = strstr(str, r->pattern); + + if (ptr && (ptr[r->len] == 0)) + return 1; + return 0; +} + +enum regex_type { + MATCH_FULL, + MATCH_FRONT_ONLY, + MATCH_MIDDLE_ONLY, + MATCH_END_ONLY, +}; + +/* + * Pass in a buffer containing a regex and this function will + * set search to point to the search part of the buffer and + * return the type of search it is (see enum above). + * This does modify buff. + * + * Returns enum type. + * search returns the pointer to use for comparison. + * not returns 1 if buff started with a '!' + * 0 otherwise. + */ +static enum regex_type +filter_parse_regex(char *buff, int len, char **search, int *not) +{ + int type = MATCH_FULL; + int i; + + if (buff[0] == '!') { + *not = 1; + buff++; + len--; + } else + *not = 0; + + *search = buff; + + for (i = 0; i < len; i++) { + if (buff[i] == '*') { + if (!i) { + *search = buff + 1; + type = MATCH_END_ONLY; + } else { + if (type == MATCH_END_ONLY) + type = MATCH_MIDDLE_ONLY; + else + type = MATCH_FRONT_ONLY; + buff[i] = 0; + break; + } + } + } + + return type; +} + +static int filter_build_regex(struct filter_pred *pred) +{ + struct regex *r = &pred->regex; + char *search, *dup; + enum regex_type type; + int not; + + type = filter_parse_regex(r->pattern, r->len, &search, ¬); + dup = kstrdup(search, GFP_KERNEL); + if (!dup) + return -ENOMEM; + + strcpy(r->pattern, dup); + kfree(dup); + + r->len = strlen(r->pattern); + + switch (type) { + case MATCH_FULL: + r->match = regex_match_full; + break; + case MATCH_FRONT_ONLY: + r->match = regex_match_front; + break; + case MATCH_MIDDLE_ONLY: + r->match = regex_match_middle; + break; + case MATCH_END_ONLY: + r->match = regex_match_end; + break; + } + + pred->not ^= not; + + return 0; +} + /* return 1 if event matches, 0 otherwise (discard) */ int filter_match_preds(struct ftrace_event_call *call, void *rec) { @@ -394,7 +514,7 @@ static void filter_clear_pred(struct filter_pred *pred) { kfree(pred->field_name); pred->field_name = NULL; - pred->str_len = 0; + pred->regex.len = 0; } static int filter_set_pred(struct filter_pred *dest, @@ -658,21 +778,24 @@ static int filter_add_pred(struct filter_parse_state *ps, } if (is_string_field(field)) { - pred->str_len = field->size; + ret = filter_build_regex(pred); + if (ret) + return ret; - if (field->filter_type == FILTER_STATIC_STRING) + if (field->filter_type == FILTER_STATIC_STRING) { fn = filter_pred_string; - else if (field->filter_type == FILTER_DYN_STRING) - fn = filter_pred_strloc; + pred->regex.field_len = field->size; + } else if (field->filter_type == FILTER_DYN_STRING) + fn = filter_pred_strloc; else { fn = filter_pred_pchar; - pred->str_len = strlen(pred->str_val); + pred->regex.field_len = strlen(pred->regex.pattern); } } else { if (field->is_signed) - ret = strict_strtoll(pred->str_val, 0, &val); + ret = strict_strtoll(pred->regex.pattern, 0, &val); else - ret = strict_strtoull(pred->str_val, 0, &val); + ret = strict_strtoull(pred->regex.pattern, 0, &val); if (ret) { parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); return -EINVAL; @@ -1042,8 +1165,8 @@ static struct filter_pred *create_pred(int op, char *operand1, char *operand2) return NULL; } - strcpy(pred->str_val, operand2); - pred->str_len = strlen(operand2); + strcpy(pred->regex.pattern, operand2); + pred->regex.len = strlen(pred->regex.pattern); pred->op = op; -- cgit v1.2.3 From 3f6fe06dbf67b46d36fedec502300e04dffeb67a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 24 Sep 2009 21:31:51 +0200 Subject: tracing/filters: Unify the regex parsing helpers The filter code has stolen the regex parsing function from ftrace to get the regex support. We have duplicated this code, so factorize it in the filter area and make it generally available, as the filter code is the most suited to host this feature. Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Tom Zanussi Cc: Li Zefan --- kernel/trace/ftrace.c | 64 +++----------------------------------- kernel/trace/trace.h | 9 ++++++ kernel/trace/trace_events_filter.c | 20 ++++++------ 3 files changed, 23 insertions(+), 70 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index cc615f84751b..ddf23a225b52 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1655,60 +1655,6 @@ ftrace_regex_lseek(struct file *file, loff_t offset, int origin) return ret; } -enum { - MATCH_FULL, - MATCH_FRONT_ONLY, - MATCH_MIDDLE_ONLY, - MATCH_END_ONLY, -}; - -/* - * (static function - no need for kernel doc) - * - * Pass in a buffer containing a glob and this function will - * set search to point to the search part of the buffer and - * return the type of search it is (see enum above). - * This does modify buff. - * - * Returns enum type. - * search returns the pointer to use for comparison. - * not returns 1 if buff started with a '!' - * 0 otherwise. - */ -static int -ftrace_setup_glob(char *buff, int len, char **search, int *not) -{ - int type = MATCH_FULL; - int i; - - if (buff[0] == '!') { - *not = 1; - buff++; - len--; - } else - *not = 0; - - *search = buff; - - for (i = 0; i < len; i++) { - if (buff[i] == '*') { - if (!i) { - *search = buff + 1; - type = MATCH_END_ONLY; - } else { - if (type == MATCH_END_ONLY) - type = MATCH_MIDDLE_ONLY; - else - type = MATCH_FRONT_ONLY; - buff[i] = 0; - break; - } - } - } - - return type; -} - static int ftrace_match(char *str, char *regex, int len, int type) { int matched = 0; @@ -1757,7 +1703,7 @@ static void ftrace_match_records(char *buff, int len, int enable) int not; flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; - type = ftrace_setup_glob(buff, len, &search, ¬); + type = filter_parse_regex(buff, len, &search, ¬); search_len = strlen(search); @@ -1825,7 +1771,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable) } if (strlen(buff)) { - type = ftrace_setup_glob(buff, strlen(buff), &search, ¬); + type = filter_parse_regex(buff, strlen(buff), &search, ¬); search_len = strlen(search); } @@ -1990,7 +1936,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, int count = 0; char *search; - type = ftrace_setup_glob(glob, strlen(glob), &search, ¬); + type = filter_parse_regex(glob, strlen(glob), &search, ¬); len = strlen(search); /* we do not support '!' for function probes */ @@ -2067,7 +2013,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, else if (glob) { int not; - type = ftrace_setup_glob(glob, strlen(glob), &search, ¬); + type = filter_parse_regex(glob, strlen(glob), &search, ¬); len = strlen(search); /* we do not support '!' for function probes */ @@ -2520,7 +2466,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) return -ENODEV; /* decode regex */ - type = ftrace_setup_glob(buffer, strlen(buffer), &search, ¬); + type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); if (not) return -EINVAL; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 8d0db6018fe4..db6b83edd49b 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -709,6 +709,13 @@ typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, typedef int (*regex_match_func)(char *str, struct regex *r, int len); +enum regex_type { + MATCH_FULL, + MATCH_FRONT_ONLY, + MATCH_MIDDLE_ONLY, + MATCH_END_ONLY, +}; + struct regex { char pattern[MAX_FILTER_STR_VAL]; int len; @@ -727,6 +734,8 @@ struct filter_pred { int pop_n; }; +extern enum regex_type +filter_parse_regex(char *buff, int len, char **search, int *not); extern void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s); extern int apply_event_filter(struct ftrace_event_call *call, diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index d3c94c139567..8c194de675b0 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -279,15 +279,14 @@ static int regex_match_end(char *str, struct regex *r, int len) return 0; } -enum regex_type { - MATCH_FULL, - MATCH_FRONT_ONLY, - MATCH_MIDDLE_ONLY, - MATCH_END_ONLY, -}; - -/* - * Pass in a buffer containing a regex and this function will +/** + * filter_parse_regex - parse a basic regex + * @buff: the raw regex + * @len: length of the regex + * @search: will point to the beginning of the string to compare + * @not: tell whether the match will have to be inverted + * + * This passes in a buffer containing a regex and this function will * set search to point to the search part of the buffer and * return the type of search it is (see enum above). * This does modify buff. @@ -297,8 +296,7 @@ enum regex_type { * not returns 1 if buff started with a '!' * 0 otherwise. */ -static enum regex_type -filter_parse_regex(char *buff, int len, char **search, int *not) +enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not) { int type = MATCH_FULL; int i; -- cgit v1.2.3 From 925936ebf35a95c290e010b784c962164e6728f3 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 28 Sep 2009 17:12:49 +0200 Subject: tracing: Pushdown the bkl tracepoints calls Currently we are calling the bkl tracepoint callbacks just before the bkl lock/unlock operations, ie the tracepoint call is not inside a lock_kernel() function but inside a lock_kernel() macro. Hence the bkl trace event header must be included from smp_lock.h. This raises some nasty circular header dependencies: linux/smp_lock.h -> trace/events/bkl.h -> trace/define_trace.h -> trace/ftrace.h -> linux/ftrace_event.h -> linux/hardirq.h -> linux/smp_lock.h This results in incomplete event declarations, spurious event definitions and other kind of funny behaviours. This is hardly fixable without ugly workarounds. So instead, we push the file name, line number and function name as lock_kernel() parameters, so that we only deal with the trace event header from lib/kernel_lock.c This adds two parameters to lock_kernel() and unlock_kernel() but it should be fine wrt to performances because this pair dos not seem to be called in fast paths. Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Ingo Molnar Cc: Li Zefan --- include/linux/smp_lock.h | 28 +++++++++++++++------------- lib/kernel_lock.c | 15 +++++++++++---- 2 files changed, 26 insertions(+), 17 deletions(-) diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index d48cc77ba70d..2ea1dd1ba21c 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h @@ -3,7 +3,6 @@ #ifdef CONFIG_LOCK_KERNEL #include -#include #define kernel_locked() (current->lock_depth >= 0) @@ -25,18 +24,21 @@ static inline int reacquire_kernel_lock(struct task_struct *task) return 0; } -extern void __lockfunc _lock_kernel(void) __acquires(kernel_lock); -extern void __lockfunc _unlock_kernel(void) __releases(kernel_lock); +extern void __lockfunc +_lock_kernel(const char *func, const char *file, int line) +__acquires(kernel_lock); -#define lock_kernel() { \ - trace_lock_kernel(__func__, __FILE__, __LINE__); \ - _lock_kernel(); \ -} +extern void __lockfunc +_unlock_kernel(const char *func, const char *file, int line) +__releases(kernel_lock); -#define unlock_kernel() { \ - trace_unlock_kernel(__func__, __FILE__, __LINE__); \ - _unlock_kernel(); \ -} +#define lock_kernel() do { \ + _lock_kernel(__func__, __FILE__, __LINE__); \ +} while (0) + +#define unlock_kernel() do { \ + _unlock_kernel(__func__, __FILE__, __LINE__); \ +} while (0) /* * Various legacy drivers don't really need the BKL in a specific @@ -52,8 +54,8 @@ static inline void cycle_kernel_lock(void) #else -#define lock_kernel() trace_lock_kernel(__func__, __FILE__, __LINE__); -#define unlock_kernel() trace_unlock_kernel(__func__, __FILE__, __LINE__); +#define lock_kernel() +#define unlock_kernel() #define release_kernel_lock(task) do { } while(0) #define cycle_kernel_lock() do { } while(0) #define reacquire_kernel_lock(task) 0 diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 5c10b2e1fd08..4ebfa5a164d7 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -8,9 +8,11 @@ #include #include #include -#define CREATE_TRACE_POINTS #include +#define CREATE_TRACE_POINTS +#include + /* * The 'big kernel lock' * @@ -114,19 +116,24 @@ static inline void __unlock_kernel(void) * This cannot happen asynchronously, so we only need to * worry about other CPU's. */ -void __lockfunc _lock_kernel(void) +void __lockfunc _lock_kernel(const char *func, const char *file, int line) { - int depth = current->lock_depth+1; + int depth = current->lock_depth + 1; + + trace_lock_kernel(func, file, line); + if (likely(!depth)) __lock_kernel(); current->lock_depth = depth; } -void __lockfunc _unlock_kernel(void) +void __lockfunc _unlock_kernel(const char *func, const char *file, int line) { BUG_ON(current->lock_depth < 0); if (likely(--current->lock_depth < 0)) __unlock_kernel(); + + trace_unlock_kernel(func, file, line); } EXPORT_SYMBOL(_lock_kernel); -- cgit v1.2.3 From 3d1d07ecd2009f65cb2091563fa21f9600c36774 Mon Sep 17 00:00:00 2001 From: John Kacur Date: Mon, 28 Sep 2009 15:32:55 +0200 Subject: perf tools: Put common histogram functions in their own file Move histogram related functions into their own files (hist.c and hist.h) and make use of them in builtin-annotate.c and builtin-report.c. Signed-off-by: John Kacur Acked-by: Frederic Weisbecker Cc: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 + tools/perf/builtin-annotate.c | 152 +-------------------------------------- tools/perf/builtin-report.c | 164 +----------------------------------------- tools/perf/util/hist.c | 164 ++++++++++++++++++++++++++++++++++++++++++ tools/perf/util/hist.h | 47 ++++++++++++ 5 files changed, 216 insertions(+), 313 deletions(-) create mode 100644 tools/perf/util/hist.c create mode 100644 tools/perf/util/hist.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 0a9e5aede318..3a99a9fda645 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -340,6 +340,7 @@ LIB_H += util/module.h LIB_H += util/color.h LIB_H += util/values.h LIB_H += util/sort.h +LIB_H += util/hist.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o @@ -376,6 +377,7 @@ LIB_OBJS += util/trace-event-read.o LIB_OBJS += util/trace-event-info.o LIB_OBJS += util/svghelper.o LIB_OBJS += util/sort.o +LIB_OBJS += util/hist.o BUILTIN_OBJS += builtin-annotate.o BUILTIN_OBJS += builtin-help.o diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 059c565b31ea..df516dce9540 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -23,6 +23,7 @@ #include "util/parse-events.h" #include "util/thread.h" #include "util/sort.h" +#include "util/hist.h" static char const *input_name = "perf.data"; @@ -47,45 +48,6 @@ struct sym_ext { char *path; }; -/* - * histogram, sorted on item, collects counts - */ - -static struct rb_root hist; - -static int64_t -hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct sort_entry *se; - int64_t cmp = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - cmp = se->cmp(left, right); - if (cmp) - break; - } - - return cmp; -} - -static int64_t -hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) -{ - struct sort_entry *se; - int64_t cmp = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - int64_t (*f)(struct hist_entry *, struct hist_entry *); - - f = se->collapse ?: se->cmp; - - cmp = f(left, right); - if (cmp) - break; - } - - return cmp; -} /* * collect histogram counts @@ -163,116 +125,6 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, return 0; } -static void hist_entry__free(struct hist_entry *he) -{ - free(he); -} - -/* - * collapse the histogram - */ - -static struct rb_root collapse_hists; - -static void collapse__insert_entry(struct hist_entry *he) -{ - struct rb_node **p = &collapse_hists.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - int64_t cmp; - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - - cmp = hist_entry__collapse(iter, he); - - if (!cmp) { - iter->count += he->count; - hist_entry__free(he); - return; - } - - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &collapse_hists); -} - -static void collapse__resort(void) -{ - struct rb_node *next; - struct hist_entry *n; - - if (!sort__need_collapse) - return; - - next = rb_first(&hist); - while (next) { - n = rb_entry(next, struct hist_entry, rb_node); - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, &hist); - collapse__insert_entry(n); - } -} - -/* - * reverse the map, sort on count. - */ - -static struct rb_root output_hists; - -static void output__insert_entry(struct hist_entry *he) -{ - struct rb_node **p = &output_hists.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - - if (he->count > iter->count) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &output_hists); -} - -static void output__resort(void) -{ - struct rb_node *next; - struct hist_entry *n; - struct rb_root *tree = &hist; - - if (sort__need_collapse) - tree = &collapse_hists; - - next = rb_first(tree); - - while (next) { - n = rb_entry(next, struct hist_entry, rb_node); - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, tree); - output__insert_entry(n); - } -} - -static unsigned long total = 0, - total_mmap = 0, - total_comm = 0, - total_fork = 0, - total_unknown = 0; - static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { @@ -861,7 +713,7 @@ more: dsos__fprintf(stdout); collapse__resort(); - output__resort(); + output__resort(total); find_annotations(); diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 7b43504900ff..c1a54fc8527a 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -28,6 +28,7 @@ #include "util/thread.h" #include "util/sort.h" +#include "util/hist.h" static char const *input_name = "perf.data"; @@ -55,8 +56,6 @@ static int exclude_other = 1; static char callchain_default_opt[] = "fractal,0.5"; -static int callchain; - static char __cwd[PATH_MAX]; static char *cwd = __cwd; static int cwdlen; @@ -66,50 +65,8 @@ static struct thread *last_match; static struct perf_header *header; -static -struct callchain_param callchain_param = { - .mode = CHAIN_GRAPH_REL, - .min_percent = 0.5 -}; - static u64 sample_type; -static struct rb_root hist; - -static int64_t -hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct sort_entry *se; - int64_t cmp = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - cmp = se->cmp(left, right); - if (cmp) - break; - } - - return cmp; -} - -static int64_t -hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) -{ - struct sort_entry *se; - int64_t cmp = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - int64_t (*f)(struct hist_entry *, struct hist_entry *); - - f = se->collapse ?: se->cmp; - - cmp = f(left, right); - if (cmp) - break; - } - - return cmp; -} - static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask) { int i; @@ -308,7 +265,6 @@ hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, return ret; } - static size_t hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples) { @@ -573,117 +529,6 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, return 0; } -static void hist_entry__free(struct hist_entry *he) -{ - free(he); -} - -/* - * collapse the histogram - */ - -static struct rb_root collapse_hists; - -static void collapse__insert_entry(struct hist_entry *he) -{ - struct rb_node **p = &collapse_hists.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - int64_t cmp; - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - - cmp = hist_entry__collapse(iter, he); - - if (!cmp) { - iter->count += he->count; - hist_entry__free(he); - return; - } - - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &collapse_hists); -} - -static void collapse__resort(void) -{ - struct rb_node *next; - struct hist_entry *n; - - if (!sort__need_collapse) - return; - - next = rb_first(&hist); - while (next) { - n = rb_entry(next, struct hist_entry, rb_node); - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, &hist); - collapse__insert_entry(n); - } -} - -/* - * reverse the map, sort on count. - */ - -static struct rb_root output_hists; - -static void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits) -{ - struct rb_node **p = &output_hists.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - - if (callchain) - callchain_param.sort(&he->sorted_chain, &he->callchain, - min_callchain_hits, &callchain_param); - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - - if (he->count > iter->count) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &output_hists); -} - -static void output__resort(u64 total_samples) -{ - struct rb_node *next; - struct hist_entry *n; - struct rb_root *tree = &hist; - u64 min_callchain_hits; - - min_callchain_hits = total_samples * (callchain_param.min_percent / 100); - - if (sort__need_collapse) - tree = &collapse_hists; - - next = rb_first(tree); - - while (next) { - n = rb_entry(next, struct hist_entry, rb_node); - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, tree); - output__insert_entry(n, min_callchain_hits); - } -} - static size_t output__fprintf(FILE *fp, u64 total_samples) { struct hist_entry *pos; @@ -778,13 +623,6 @@ print_entries: return ret; } -static unsigned long total = 0, - total_mmap = 0, - total_comm = 0, - total_fork = 0, - total_unknown = 0, - total_lost = 0; - static int validate_chain(struct ip_callchain *chain, event_t *event) { unsigned int chain_size; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c new file mode 100644 index 000000000000..82808dc4f8e3 --- /dev/null +++ b/tools/perf/util/hist.c @@ -0,0 +1,164 @@ +#include "hist.h" + +struct rb_root hist; +struct rb_root collapse_hists; +struct rb_root output_hists; +int callchain; + +struct callchain_param callchain_param = { + .mode = CHAIN_GRAPH_REL, + .min_percent = 0.5 +}; + +unsigned long total; +unsigned long total_mmap; +unsigned long total_comm; +unsigned long total_fork; +unsigned long total_unknown; +unsigned long total_lost; + +/* + * histogram, sorted on item, collects counts + */ + +int64_t +hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct sort_entry *se; + int64_t cmp = 0; + + list_for_each_entry(se, &hist_entry__sort_list, list) { + cmp = se->cmp(left, right); + if (cmp) + break; + } + + return cmp; +} + +int64_t +hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) +{ + struct sort_entry *se; + int64_t cmp = 0; + + list_for_each_entry(se, &hist_entry__sort_list, list) { + int64_t (*f)(struct hist_entry *, struct hist_entry *); + + f = se->collapse ?: se->cmp; + + cmp = f(left, right); + if (cmp) + break; + } + + return cmp; +} + +void hist_entry__free(struct hist_entry *he) +{ + free(he); +} + +/* + * collapse the histogram + */ + +void collapse__insert_entry(struct hist_entry *he) +{ + struct rb_node **p = &collapse_hists.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *iter; + int64_t cmp; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct hist_entry, rb_node); + + cmp = hist_entry__collapse(iter, he); + + if (!cmp) { + iter->count += he->count; + hist_entry__free(he); + return; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &collapse_hists); +} + +void collapse__resort(void) +{ + struct rb_node *next; + struct hist_entry *n; + + if (!sort__need_collapse) + return; + + next = rb_first(&hist); + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + + rb_erase(&n->rb_node, &hist); + collapse__insert_entry(n); + } +} + +/* + * reverse the map, sort on count. + */ + +void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits) +{ + struct rb_node **p = &output_hists.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *iter; + + if (callchain) + callchain_param.sort(&he->sorted_chain, &he->callchain, + min_callchain_hits, &callchain_param); + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct hist_entry, rb_node); + + if (he->count > iter->count) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &output_hists); +} + +void output__resort(u64 total_samples) +{ + struct rb_node *next; + struct hist_entry *n; + struct rb_root *tree = &hist; + u64 min_callchain_hits; + + min_callchain_hits = + total_samples * (callchain_param.min_percent / 100); + + if (sort__need_collapse) + tree = &collapse_hists; + + next = rb_first(tree); + + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + + rb_erase(&n->rb_node, tree); + output__insert_entry(n, min_callchain_hits); + } +} diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h new file mode 100644 index 000000000000..9a8daa12b43a --- /dev/null +++ b/tools/perf/util/hist.h @@ -0,0 +1,47 @@ +#ifndef __PERF_HIST_H +#define __PERF_HIST_H +#include "../builtin.h" + +#include "util.h" + +#include "color.h" +#include +#include "cache.h" +#include +#include "symbol.h" +#include "string.h" +#include "callchain.h" +#include "strlist.h" +#include "values.h" + +#include "../perf.h" +#include "debug.h" +#include "header.h" + +#include "parse-options.h" +#include "parse-events.h" + +#include "thread.h" +#include "sort.h" + +extern struct rb_root hist; +extern struct rb_root collapse_hists; +extern struct rb_root output_hists; +extern int callchain; +extern struct callchain_param callchain_param; +extern unsigned long total; +extern unsigned long total_mmap; +extern unsigned long total_comm; +extern unsigned long total_fork; +extern unsigned long total_unknown; +extern unsigned long total_lost; + +extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *); +extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *); +extern void hist_entry__free(struct hist_entry *); +extern void collapse__insert_entry(struct hist_entry *); +extern void collapse__resort(void); +extern void output__insert_entry(struct hist_entry *, u64); +extern void output__resort(u64); + +#endif /* __PERF_HIST_H */ -- cgit v1.2.3 From 1b46cddfccfec4cc67b187fb53d78198de6a057c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 28 Sep 2009 14:48:46 -0300 Subject: perf tools: Use rb_tree for maps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Threads can have many and kernel modules will be represented as a tree of maps as well. Ah, and for a perf.data with 146607 samples: Before: [root@doppio ~]# perf stat -r 5 perf report > /dev/null Performance counter stats for 'perf report' (5 runs): 699.823680 task-clock-msecs # 0.991 CPUs ( +- 0.454% ) 74 context-switches # 0.000 M/sec ( +- 1.709% ) 2 CPU-migrations # 0.000 M/sec ( +- 17.008% ) 23114 page-faults # 0.033 M/sec ( +- 0.000% ) 1381257019 cycles # 1973.721 M/sec ( +- 0.290% ) 1456894438 instructions # 1.055 IPC ( +- 0.007% ) 18779818 cache-references # 26.835 M/sec ( +- 0.380% ) 641799 cache-misses # 0.917 M/sec ( +- 1.200% ) 0.705972729 seconds time elapsed ( +- 0.501% ) [root@doppio ~]# After Performance counter stats for 'perf report' (5 runs): 691.261451 task-clock-msecs # 0.993 CPUs ( +- 0.307% ) 72 context-switches # 0.000 M/sec ( +- 0.829% ) 6 CPU-migrations # 0.000 M/sec ( +- 18.409% ) 23127 page-faults # 0.033 M/sec ( +- 0.000% ) 1366395876 cycles # 1976.670 M/sec ( +- 0.153% ) 1443136016 instructions # 1.056 IPC ( +- 0.012% ) 17956402 cache-references # 25.976 M/sec ( +- 0.325% ) 661924 cache-misses # 0.958 M/sec ( +- 1.335% ) 0.696127275 seconds time elapsed ( +- 0.377% ) I.e. we see some speedup too. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith Cc: "H. Peter Anvin" LKML-Reference: <20090928174846.GA3361@ghostprotocols.net> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 + tools/perf/util/event.h | 4 +- tools/perf/util/thread.c | 129 ++++++++++++++++++++++++++++++----------------- tools/perf/util/thread.h | 12 +++-- 4 files changed, 95 insertions(+), 51 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 3a99a9fda645..055290a5b835 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -341,6 +341,7 @@ LIB_H += util/color.h LIB_H += util/values.h LIB_H += util/sort.h LIB_H += util/hist.h +LIB_H += util/thread.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index c31a5da6458b..4c69eb553807 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -3,7 +3,7 @@ #include "../perf.h" #include "util.h" -#include +#include enum { SHOW_KERNEL = 1, @@ -79,7 +79,7 @@ typedef union event_union { } event_t; struct map { - struct list_head node; + struct rb_node rb_node; u64 start; u64 end; u64 pgoff; diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 45efb5db0d19..9d0945cc66d1 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -15,7 +15,7 @@ static struct thread *thread__new(pid_t pid) self->comm = malloc(32); if (self->comm) snprintf(self->comm, 32, ":%d", self->pid); - INIT_LIST_HEAD(&self->maps); + self->maps = RB_ROOT; } return self; @@ -31,11 +31,13 @@ int thread__set_comm(struct thread *self, const char *comm) static size_t thread__fprintf(struct thread *self, FILE *fp) { - struct map *pos; + struct rb_node *nd; size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm); - list_for_each_entry(pos, &self->maps, node) + for (nd = rb_first(&self->maps); nd; nd = rb_next(nd)) { + struct map *pos = rb_entry(nd, struct map, rb_node); ret += map__fprintf(pos, fp); + } return ret; } @@ -93,42 +95,90 @@ register_idle_thread(struct rb_root *threads, struct thread **last_match) return thread; } -void thread__insert_map(struct thread *self, struct map *map) +static void thread__remove_overlappings(struct thread *self, struct map *map) { - struct map *pos, *tmp; - - list_for_each_entry_safe(pos, tmp, &self->maps, node) { - if (map__overlap(pos, map)) { - if (verbose >= 2) { - printf("overlapping maps:\n"); - map__fprintf(map, stdout); - map__fprintf(pos, stdout); - } - - if (map->start <= pos->start && map->end > pos->start) - pos->start = map->end; - - if (map->end >= pos->end && map->start < pos->end) - pos->end = map->start; - - if (verbose >= 2) { - printf("after collision:\n"); - map__fprintf(pos, stdout); - } - - if (pos->start >= pos->end) { - list_del_init(&pos->node); - free(pos); - } + struct rb_node *next = rb_first(&self->maps); + + while (next) { + struct map *pos = rb_entry(next, struct map, rb_node); + next = rb_next(&pos->rb_node); + + if (!map__overlap(pos, map)) + continue; + + if (verbose >= 2) { + printf("overlapping maps:\n"); + map__fprintf(map, stdout); + map__fprintf(pos, stdout); + } + + if (map->start <= pos->start && map->end > pos->start) + pos->start = map->end; + + if (map->end >= pos->end && map->start < pos->end) + pos->end = map->start; + + if (verbose >= 2) { + printf("after collision:\n"); + map__fprintf(pos, stdout); + } + + if (pos->start >= pos->end) { + rb_erase(&pos->rb_node, &self->maps); + free(pos); } } +} + +void maps__insert(struct rb_root *maps, struct map *map) +{ + struct rb_node **p = &maps->rb_node; + struct rb_node *parent = NULL; + const u64 ip = map->start; + struct map *m; + + while (*p != NULL) { + parent = *p; + m = rb_entry(parent, struct map, rb_node); + if (ip < m->start) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&map->rb_node, parent, p); + rb_insert_color(&map->rb_node, maps); +} + +struct map *maps__find(struct rb_root *maps, u64 ip) +{ + struct rb_node **p = &maps->rb_node; + struct rb_node *parent = NULL; + struct map *m; + + while (*p != NULL) { + parent = *p; + m = rb_entry(parent, struct map, rb_node); + if (ip < m->start) + p = &(*p)->rb_left; + else if (ip > m->end) + p = &(*p)->rb_right; + else + return m; + } + + return NULL; +} - list_add_tail(&map->node, &self->maps); +void thread__insert_map(struct thread *self, struct map *map) +{ + thread__remove_overlappings(self, map); + maps__insert(&self->maps, map); } int thread__fork(struct thread *self, struct thread *parent) { - struct map *map; + struct rb_node *nd; if (self->comm) free(self->comm); @@ -136,7 +186,8 @@ int thread__fork(struct thread *self, struct thread *parent) if (!self->comm) return -ENOMEM; - list_for_each_entry(map, &parent->maps, node) { + for (nd = rb_first(&parent->maps); nd; nd = rb_next(nd)) { + struct map *map = rb_entry(nd, struct map, rb_node); struct map *new = map__clone(map); if (!new) return -ENOMEM; @@ -146,20 +197,6 @@ int thread__fork(struct thread *self, struct thread *parent) return 0; } -struct map *thread__find_map(struct thread *self, u64 ip) -{ - struct map *pos; - - if (self == NULL) - return NULL; - - list_for_each_entry(pos, &self->maps, node) - if (ip >= pos->start && ip <= pos->end) - return pos; - - return NULL; -} - size_t threads__fprintf(FILE *fp, struct rb_root *threads) { size_t ret = 0; diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 693ed1ea10b4..bbb37c1a52ee 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -2,13 +2,12 @@ #define __PERF_THREAD_H #include -#include #include #include "symbol.h" struct thread { struct rb_node rb_node; - struct list_head maps; + struct rb_root maps; pid_t pid; char shortname[3]; char *comm; @@ -21,7 +20,14 @@ struct thread * register_idle_thread(struct rb_root *threads, struct thread **last_match); void thread__insert_map(struct thread *self, struct map *map); int thread__fork(struct thread *self, struct thread *parent); -struct map *thread__find_map(struct thread *self, u64 ip); size_t threads__fprintf(FILE *fp, struct rb_root *threads); +void maps__insert(struct rb_root *maps, struct map *map); +struct map *maps__find(struct rb_root *maps, u64 ip); + +static inline struct map *thread__find_map(struct thread *self, u64 ip) +{ + return self ? maps__find(&self->maps, ip) : NULL; +} + #endif /* __PERF_THREAD_H */ -- cgit v1.2.3 From a80deb622dba7dfb65d9e27b6b74b7c1963c3635 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 28 Sep 2009 15:23:51 -0300 Subject: perf sched: Remove dead code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Several variables are not used at all, cut'n'paste leftovers. Also check if the sample_type is RAW earlier, to avoid needless searches. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: "H. Peter Anvin" Cc: Peter Zijlstra Cc: Mike Galbraith Signed-off-by: Ingo Molnar --- tools/perf/builtin-sched.c | 34 ++++------------------------------ 1 file changed, 4 insertions(+), 30 deletions(-) diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index ea9c15c0cdfe..4470f2535706 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1544,16 +1544,15 @@ process_raw_event(event_t *raw_event __used, void *more_data, static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { - char level; - int show = 0; - struct dso *dso = NULL; struct thread *thread; u64 ip = event->ip.ip; u64 timestamp = -1; u32 cpu = -1; u64 period = 1; void *more_data = event->ip.__more_data; - int cpumode; + + if (!(sample_type & PERF_SAMPLE_RAW)) + return 0; thread = threads__findnew(event->ip.pid, &threads, &last_match); @@ -1589,32 +1588,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) return -1; } - cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - - if (cpumode == PERF_RECORD_MISC_KERNEL) { - show = SHOW_KERNEL; - level = 'k'; - - dso = kernel_dso; - - dump_printf(" ...... dso: %s\n", dso->name); - - } else if (cpumode == PERF_RECORD_MISC_USER) { - - show = SHOW_USER; - level = '.'; - - } else { - show = SHOW_HV; - level = 'H'; - - dso = hypervisor_dso; - - dump_printf(" ...... dso: [hypervisor]\n"); - } - - if (sample_type & PERF_SAMPLE_RAW) - process_raw_event(event, more_data, cpu, timestamp, thread); + process_raw_event(event, more_data, cpu, timestamp, thread); return 0; } -- cgit v1.2.3 From cad3071424edd7854f63aa80d09473e84f49ed79 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 28 Sep 2009 17:08:18 -0300 Subject: perf trace: Remove dead code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Several variables are not used at all, cut'n'paste leftovers. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith Cc: "H. Peter Anvin" LKML-Reference: <20090928200818.GF3361@ghostprotocols.net> Signed-off-by: Ingo Molnar --- tools/perf/builtin-trace.c | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index e9d256e2f47d..2f938887335a 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -53,16 +53,12 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { - char level; - int show = 0; - struct dso *dso = NULL; struct thread *thread; u64 ip = event->ip.ip; u64 timestamp = -1; u32 cpu = -1; u64 period = 1; void *more_data = event->ip.__more_data; - int cpumode; thread = threads__findnew(event->ip.pid, &threads, &last_match); @@ -98,30 +94,6 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) return -1; } - cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - - if (cpumode == PERF_RECORD_MISC_KERNEL) { - show = SHOW_KERNEL; - level = 'k'; - - dso = kernel_dso; - - dump_printf(" ...... dso: %s\n", dso->name); - - } else if (cpumode == PERF_RECORD_MISC_USER) { - - show = SHOW_USER; - level = '.'; - - } else { - show = SHOW_HV; - level = 'H'; - - dso = hypervisor_dso; - - dump_printf(" ...... dso: [hypervisor]\n"); - } - if (sample_type & PERF_SAMPLE_RAW) { struct { u32 size; -- cgit v1.2.3 From 2ccdc450e658053681202d42ac64b3638f22dc1a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 24 Sep 2009 14:24:00 -0700 Subject: perf top: Remove dead {min,max}_ip unused variables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: "H. Peter Anvin" Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: <20090924212400.GA15321@ghostprotocols.net> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 1ca88896eee4..bf464ce7e3e2 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -96,9 +96,6 @@ static int display_weighted = -1; * Symbols */ -static u64 min_ip; -static u64 max_ip = -1ll; - struct sym_entry { struct rb_node rb_node; struct list_head node; @@ -826,8 +823,6 @@ static int symbol_filter(struct dso *self, struct symbol *sym) static int parse_symbols(void) { - struct rb_node *node; - struct symbol *sym; int use_modules = vmlinux_name ? 1 : 0; kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry)); @@ -837,14 +832,6 @@ static int parse_symbols(void) if (dso__load_kernel(kernel_dso, vmlinux_name, symbol_filter, verbose, use_modules) <= 0) goto out_delete_dso; - node = rb_first(&kernel_dso->syms); - sym = rb_entry(node, struct symbol, rb_node); - min_ip = sym->start; - - node = rb_last(&kernel_dso->syms); - sym = rb_entry(node, struct symbol, rb_node); - max_ip = sym->end; - if (dump_symtab) dso__fprintf(kernel_dso, stderr); -- cgit v1.2.3 From 439d473b4777de510e1322168ac6f2f377ecd5bc Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 2 Oct 2009 03:29:58 -0300 Subject: perf tools: Rewrite and improve support for kernel modules MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Representing modules as struct map entries, backed by a DSO, etc, using /proc/modules to find where the module is loaded. DSOs now can have a short and long name, so that in verbose mode we can show exactly which .ko or vmlinux image was used. As kernel modules now are a DSO separate from the kernel, we can ask for just the hits for a particular set of kernel modules, just like we can do with shared libraries: [root@doppio linux-2.6-tip]# perf report -n --vmlinux /home/acme/git/build/tip-recvmmsg/vmlinux --modules --dsos \[drm\] | head -15 84.58% 13266 Xorg [k] drm_clflush_pages 4.02% 630 Xorg [k] trace_kmalloc.clone.0 3.95% 619 Xorg [k] drm_ioctl 2.07% 324 Xorg [k] drm_addbufs 1.68% 263 Xorg [k] drm_gem_close_ioctl 0.77% 120 Xorg [k] drm_setmaster_ioctl 0.70% 110 Xorg [k] drm_lastclose 0.68% 106 Xorg [k] drm_open 0.54% 85 Xorg [k] drm_mm_search_free [root@doppio linux-2.6-tip]# Specifying --dsos /lib/modules/2.6.31-tip/kernel/drivers/gpu/drm/drm.ko would have the same effect. Allowing specifying just 'drm.ko' is left for another patch. Processing kallsyms so that per kernel module struct map are instantiated was also left for another patch. That will allow removing the module name from each of its symbols. struct symbol was reduced by removing the ->module backpointer and moving it (well now the map) to struct symbol_entry in perf top, that is its only user right now. The total linecount went down by ~500 lines. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: "H. Peter Anvin" Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Avi Kivity Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 - tools/perf/builtin-annotate.c | 73 +++--- tools/perf/builtin-report.c | 79 +++--- tools/perf/builtin-top.c | 74 ++---- tools/perf/util/event.h | 6 +- tools/perf/util/module.c | 545 ------------------------------------------ tools/perf/util/module.h | 53 ---- tools/perf/util/sort.c | 38 +-- tools/perf/util/sort.h | 7 +- tools/perf/util/symbol.c | 447 +++++++++++++++++++++++----------- tools/perf/util/symbol.h | 20 +- tools/perf/util/thread.c | 34 +-- tools/perf/util/thread.h | 4 + 13 files changed, 453 insertions(+), 929 deletions(-) delete mode 100644 tools/perf/util/module.c delete mode 100644 tools/perf/util/module.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 055290a5b835..8e7509f2d882 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -336,7 +336,6 @@ LIB_H += util/strlist.h LIB_H += util/run-command.h LIB_H += util/sigchain.h LIB_H += util/symbol.h -LIB_H += util/module.h LIB_H += util/color.h LIB_H += util/values.h LIB_H += util/sort.h @@ -364,7 +363,6 @@ LIB_OBJS += util/usage.o LIB_OBJS += util/wrapper.o LIB_OBJS += util/sigchain.o LIB_OBJS += util/symbol.o -LIB_OBJS += util/module.o LIB_OBJS += util/color.o LIB_OBJS += util/pager.o LIB_OBJS += util/header.o diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index df516dce9540..7d5a3b1bcda9 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -63,6 +63,7 @@ static void hist_hit(struct hist_entry *he, u64 ip) return; sym_size = sym->end - sym->start; + ip = he->map->map_ip(he->map, ip); offset = ip - sym->start; if (offset >= sym_size) @@ -80,7 +81,7 @@ static void hist_hit(struct hist_entry *he, u64 ip) } static int -hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, +hist_entry__add(struct thread *thread, struct map *map, struct symbol *sym, u64 ip, char level) { struct rb_node **p = &hist.rb_node; @@ -89,7 +90,6 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, struct hist_entry entry = { .thread = thread, .map = map, - .dso = dso, .sym = sym, .ip = ip, .level = level, @@ -130,10 +130,10 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) { char level; int show = 0; - struct dso *dso = NULL; struct thread *thread; u64 ip = event->ip.ip; struct map *map = NULL; + struct symbol *sym = NULL; thread = threads__findnew(event->ip.pid, &threads, &last_match); @@ -155,32 +155,35 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) if (event->header.misc & PERF_RECORD_MISC_KERNEL) { show = SHOW_KERNEL; level = 'k'; - - dso = kernel_dso; - - dump_printf(" ...... dso: %s\n", dso->name); - + sym = kernel_maps__find_symbol(ip, &map); + dump_printf(" ...... dso: %s\n", + map ? map->dso->long_name : ""); } else if (event->header.misc & PERF_RECORD_MISC_USER) { - show = SHOW_USER; level = '.'; - map = thread__find_map(thread, ip); if (map != NULL) { +got_map: ip = map->map_ip(map, ip); - dso = map->dso; + sym = map->dso->find_symbol(map->dso, ip); } else { /* * If this is outside of all known maps, * and is a negative address, try to look it * up in the kernel dso, as it might be a - * vsyscall (which executes in user-mode): + * vsyscall or vdso (which executes in user-mode). + * + * XXX This is nasty, we should have a symbol list in + * the "[vdso]" dso, but for now lets use the old + * trick of looking in the whole kernel symbol list. */ - if ((long long)ip < 0) - dso = kernel_dso; + if ((long long)ip < 0) { + map = kernel_map; + goto got_map; + } } - dump_printf(" ...... dso: %s\n", dso ? dso->name : ""); - + dump_printf(" ...... dso: %s\n", + map ? map->dso->long_name : ""); } else { show = SHOW_HV; level = 'H'; @@ -188,12 +191,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) } if (show & show_mask) { - struct symbol *sym = NULL; - - if (dso) - sym = dso->find_symbol(dso, ip); - - if (hist_entry__add(thread, map, dso, sym, ip, level)) { + if (hist_entry__add(thread, map, sym, ip, level)) { fprintf(stderr, "problem incrementing symbol count, skipping event\n"); return -1; @@ -313,7 +311,7 @@ process_event(event_t *event, unsigned long offset, unsigned long head) } static int -parse_line(FILE *file, struct symbol *sym, u64 start, u64 len) +parse_line(FILE *file, struct symbol *sym, u64 len) { char *line = NULL, *tmp, *tmp2; static const char *prev_line; @@ -363,7 +361,7 @@ parse_line(FILE *file, struct symbol *sym, u64 start, u64 len) const char *color; struct sym_ext *sym_ext = sym->priv; - offset = line_ip - start; + offset = line_ip - sym->start; if (offset < len) hits = sym->hist[offset]; @@ -442,7 +440,7 @@ static void free_source_line(struct symbol *sym, int len) /* Get the filename:line for the colored entries */ static void -get_source_line(struct symbol *sym, u64 start, int len, const char *filename) +get_source_line(struct symbol *sym, int len, const char *filename) { int i; char cmd[PATH_MAX * 2]; @@ -467,7 +465,7 @@ get_source_line(struct symbol *sym, u64 start, int len, const char *filename) if (sym_ext[i].percent <= 0.5) continue; - offset = start + i; + offset = sym->start + i; sprintf(cmd, "addr2line -e %s %016llx", filename, offset); fp = popen(cmd, "r"); if (!fp) @@ -519,31 +517,23 @@ static void print_summary(const char *filename) static void annotate_sym(struct dso *dso, struct symbol *sym) { - const char *filename = dso->name, *d_filename; - u64 start, end, len; + const char *filename = dso->long_name, *d_filename; + u64 len; char command[PATH_MAX*2]; FILE *file; if (!filename) return; - if (sym->module) - filename = sym->module->path; - else if (dso == kernel_dso) - filename = vmlinux_name; - - start = sym->obj_start; - if (!start) - start = sym->start; + if (full_paths) d_filename = filename; else d_filename = basename(filename); - end = start + sym->end - sym->start + 1; len = sym->end - sym->start; if (print_line) { - get_source_line(sym, start, len, filename); + get_source_line(sym, len, filename); print_summary(filename); } @@ -552,10 +542,11 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) printf("------------------------------------------------\n"); if (verbose >= 2) - printf("annotating [%p] %30s : [%p] %30s\n", dso, dso->name, sym, sym->name); + printf("annotating [%p] %30s : [%p] %30s\n", + dso, dso->long_name, sym, sym->name); sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s", - (u64)start, (u64)end, filename, filename); + sym->start, sym->end, filename, filename); if (verbose >= 3) printf("doing: %s\n", command); @@ -565,7 +556,7 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) return; while (!feof(file)) { - if (parse_line(file, sym, start, len) < 0) + if (parse_line(file, sym, len) < 0) break; } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index c1a54fc8527a..3ed3baf96ffb 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -349,22 +349,17 @@ static int thread__set_comm_adjust(struct thread *self, const char *comm) static struct symbol * -resolve_symbol(struct thread *thread, struct map **mapp, - struct dso **dsop, u64 *ipp) +resolve_symbol(struct thread *thread, struct map **mapp, u64 *ipp) { - struct dso *dso = dsop ? *dsop : NULL; struct map *map = mapp ? *mapp : NULL; u64 ip = *ipp; - if (!thread) - return NULL; - - if (dso) - goto got_dso; - if (map) goto got_map; + if (!thread) + return NULL; + map = thread__find_map(thread, ip); if (map != NULL) { /* @@ -379,29 +374,29 @@ resolve_symbol(struct thread *thread, struct map **mapp, *mapp = map; got_map: ip = map->map_ip(map, ip); - - dso = map->dso; } else { /* * If this is outside of all known maps, * and is a negative address, try to look it * up in the kernel dso, as it might be a - * vsyscall (which executes in user-mode): + * vsyscall or vdso (which executes in user-mode). + * + * XXX This is nasty, we should have a symbol list in + * the "[vdso]" dso, but for now lets use the old + * trick of looking in the whole kernel symbol list. */ - if ((long long)ip < 0) - dso = kernel_dso; + if ((long long)ip < 0) { + map = kernel_map; + if (mapp) + *mapp = map; + } } - dump_printf(" ...... dso: %s\n", dso ? dso->name : ""); + dump_printf(" ...... dso: %s\n", + map ? map->dso->long_name : ""); dump_printf(" ...... map: %Lx -> %Lx\n", *ipp, ip); *ipp = ip; - if (dsop) - *dsop = dso; - - if (!dso) - return NULL; -got_dso: - return dso->find_symbol(dso, ip); + return map ? map->dso->find_symbol(map->dso, ip) : NULL; } static int call__match(struct symbol *sym) @@ -413,7 +408,7 @@ static int call__match(struct symbol *sym) } static struct symbol ** -resolve_callchain(struct thread *thread, struct map *map __used, +resolve_callchain(struct thread *thread, struct map *map, struct ip_callchain *chain, struct hist_entry *entry) { u64 context = PERF_CONTEXT_MAX; @@ -430,8 +425,7 @@ resolve_callchain(struct thread *thread, struct map *map __used, for (i = 0; i < chain->nr; i++) { u64 ip = chain->ips[i]; - struct dso *dso = NULL; - struct symbol *sym; + struct symbol *sym = NULL; if (ip >= PERF_CONTEXT_MAX) { context = ip; @@ -440,17 +434,15 @@ resolve_callchain(struct thread *thread, struct map *map __used, switch (context) { case PERF_CONTEXT_HV: - dso = hypervisor_dso; break; case PERF_CONTEXT_KERNEL: - dso = kernel_dso; + sym = kernel_maps__find_symbol(ip, &map); break; default: + sym = resolve_symbol(thread, &map, &ip); break; } - sym = resolve_symbol(thread, NULL, &dso, &ip); - if (sym) { if (sort__has_parent && call__match(sym) && !entry->parent) @@ -469,7 +461,7 @@ resolve_callchain(struct thread *thread, struct map *map __used, */ static int -hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, +hist_entry__add(struct thread *thread, struct map *map, struct symbol *sym, u64 ip, struct ip_callchain *chain, char level, u64 count) { @@ -480,7 +472,6 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, struct hist_entry entry = { .thread = thread, .map = map, - .dso = dso, .sym = sym, .ip = ip, .level = level, @@ -641,7 +632,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) { char level; int show = 0; - struct dso *dso = NULL; + struct symbol *sym = NULL; struct thread *thread; u64 ip = event->ip.ip; u64 period = 1; @@ -700,35 +691,35 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) show = SHOW_KERNEL; level = 'k'; - dso = kernel_dso; - - dump_printf(" ...... dso: %s\n", dso->name); - + sym = kernel_maps__find_symbol(ip, &map); + dump_printf(" ...... dso: %s\n", + map ? map->dso->long_name : ""); } else if (cpumode == PERF_RECORD_MISC_USER) { show = SHOW_USER; level = '.'; + sym = resolve_symbol(thread, &map, &ip); } else { show = SHOW_HV; level = 'H'; - dso = hypervisor_dso; - dump_printf(" ...... dso: [hypervisor]\n"); } if (show & show_mask) { - struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip); - - if (dso_list && (!dso || !dso->name || - !strlist__has_entry(dso_list, dso->name))) + if (dso_list && + (!map || !map->dso || + !(strlist__has_entry(dso_list, map->dso->short_name) || + (map->dso->short_name != map->dso->long_name && + strlist__has_entry(dso_list, map->dso->long_name))))) return 0; - if (sym_list && (!sym || !strlist__has_entry(sym_list, sym->name))) + if (sym_list && sym && !strlist__has_entry(sym_list, sym->name)) return 0; - if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) { + if (hist_entry__add(thread, map, sym, ip, + chain, level, period)) { eprintf("problem incrementing symbol count, skipping event\n"); return -1; } diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index bf464ce7e3e2..befef842757e 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -22,6 +22,7 @@ #include "util/symbol.h" #include "util/color.h" +#include "util/thread.h" #include "util/util.h" #include #include "util/parse-options.h" @@ -103,6 +104,7 @@ struct sym_entry { unsigned long snap_count; double weight; int skip; + struct map *map; struct source_line *source; struct source_line *lines; struct source_line **lines_tail; @@ -116,12 +118,11 @@ struct sym_entry { static void parse_source(struct sym_entry *syme) { struct symbol *sym; - struct module *module; - struct section *section = NULL; + struct map *map; FILE *file; char command[PATH_MAX*2]; - const char *path = vmlinux_name; - u64 start, end, len; + const char *path; + u64 len; if (!syme) return; @@ -132,27 +133,15 @@ static void parse_source(struct sym_entry *syme) } sym = (struct symbol *)(syme + 1); - module = sym->module; + map = syme->map; + path = map->dso->long_name; - if (module) - path = module->path; - if (!path) - return; - - start = sym->obj_start; - if (!start) - start = sym->start; - - if (module) { - section = module->sections->find_section(module->sections, ".text"); - if (section) - start -= section->vma; - } - - end = start + sym->end - sym->start + 1; len = sym->end - sym->start; - sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", start, end, path); + sprintf(command, + "objdump --start-address=0x%016Lx " + "--stop-address=0x%016Lx -dS %s", + sym->start, sym->end, path); file = popen(command, "r"); if (!file) @@ -184,13 +173,11 @@ static void parse_source(struct sym_entry *syme) if (strlen(src->line)>8 && src->line[8] == ':') { src->eip = strtoull(src->line, NULL, 16); - if (section) - src->eip += section->vma; + src->eip += map->start; } if (strlen(src->line)>8 && src->line[16] == ':') { src->eip = strtoull(src->line, NULL, 16); - if (section) - src->eip += section->vma; + src->eip += map->start; } } pclose(file); @@ -242,16 +229,9 @@ static void lookup_sym_source(struct sym_entry *syme) struct symbol *symbol = (struct symbol *)(syme + 1); struct source_line *line; char pattern[PATH_MAX]; - char *idx; sprintf(pattern, "<%s>:", symbol->name); - if (symbol->module) { - idx = strstr(pattern, "\t"); - if (idx) - *idx = 0; - } - pthread_mutex_lock(&syme->source_lock); for (line = syme->lines; line; line = line->next) { if (strstr(line->line, pattern)) { @@ -513,8 +493,8 @@ static void print_sym_table(void) if (verbose) printf(" - %016llx", sym->start); printf(" : %s", sym->name); - if (sym->module) - printf("\t[%s]", sym->module->name); + if (syme->map->dso->name[0] == '[') + printf(" \t%s", syme->map->dso->name); printf("\n"); } } @@ -784,7 +764,7 @@ static const char *skip_symbols[] = { NULL }; -static int symbol_filter(struct dso *self, struct symbol *sym) +static int symbol_filter(struct map *map, struct symbol *sym) { struct sym_entry *syme; const char *name = sym->name; @@ -806,7 +786,8 @@ static int symbol_filter(struct dso *self, struct symbol *sym) strstr(name, "_text_end")) return 1; - syme = dso__sym_priv(self, sym); + syme = dso__sym_priv(map->dso, sym); + syme->map = map; pthread_mutex_init(&syme->source_lock, NULL); if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) sym_filter_entry = syme; @@ -825,22 +806,14 @@ static int parse_symbols(void) { int use_modules = vmlinux_name ? 1 : 0; - kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry)); - if (kernel_dso == NULL) + if (dsos__load_kernel(vmlinux_name, sizeof(struct sym_entry), + symbol_filter, verbose, use_modules) <= 0) return -1; - if (dso__load_kernel(kernel_dso, vmlinux_name, symbol_filter, verbose, use_modules) <= 0) - goto out_delete_dso; - if (dump_symtab) - dso__fprintf(kernel_dso, stderr); + dsos__fprintf(stderr); return 0; - -out_delete_dso: - dso__delete(kernel_dso); - kernel_dso = NULL; - return -1; } /* @@ -848,10 +821,11 @@ out_delete_dso: */ static void record_ip(u64 ip, int counter) { - struct symbol *sym = dso__find_symbol(kernel_dso, ip); + struct map *map; + struct symbol *sym = kernel_maps__find_symbol(ip, &map); if (sym != NULL) { - struct sym_entry *syme = dso__sym_priv(kernel_dso, sym); + struct sym_entry *syme = dso__sym_priv(map->dso, sym); if (!syme->skip) { syme->count[counter]++; diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 4c69eb553807..a39520e6ae8f 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -3,6 +3,7 @@ #include "../perf.h" #include "util.h" +#include #include enum { @@ -79,7 +80,10 @@ typedef union event_union { } event_t; struct map { - struct rb_node rb_node; + union { + struct rb_node rb_node; + struct list_head node; + }; u64 start; u64 end; u64 pgoff; diff --git a/tools/perf/util/module.c b/tools/perf/util/module.c deleted file mode 100644 index 0d8c85defcd2..000000000000 --- a/tools/perf/util/module.c +++ /dev/null @@ -1,545 +0,0 @@ -#include "util.h" -#include "../perf.h" -#include "string.h" -#include "module.h" - -#include -#include -#include -#include -#include -#include - -static unsigned int crc32(const char *p, unsigned int len) -{ - int i; - unsigned int crc = 0; - - while (len--) { - crc ^= *p++; - for (i = 0; i < 8; i++) - crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0); - } - return crc; -} - -/* module section methods */ - -struct sec_dso *sec_dso__new_dso(const char *name) -{ - struct sec_dso *self = malloc(sizeof(*self) + strlen(name) + 1); - - if (self != NULL) { - strcpy(self->name, name); - self->secs = RB_ROOT; - self->find_section = sec_dso__find_section; - } - - return self; -} - -static void sec_dso__delete_section(struct section *self) -{ - free(((void *)self)); -} - -void sec_dso__delete_sections(struct sec_dso *self) -{ - struct section *pos; - struct rb_node *next = rb_first(&self->secs); - - while (next) { - pos = rb_entry(next, struct section, rb_node); - next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, &self->secs); - sec_dso__delete_section(pos); - } -} - -void sec_dso__delete_self(struct sec_dso *self) -{ - sec_dso__delete_sections(self); - free(self); -} - -static void sec_dso__insert_section(struct sec_dso *self, struct section *sec) -{ - struct rb_node **p = &self->secs.rb_node; - struct rb_node *parent = NULL; - const u64 hash = sec->hash; - struct section *s; - - while (*p != NULL) { - parent = *p; - s = rb_entry(parent, struct section, rb_node); - if (hash < s->hash) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - rb_link_node(&sec->rb_node, parent, p); - rb_insert_color(&sec->rb_node, &self->secs); -} - -struct section *sec_dso__find_section(struct sec_dso *self, const char *name) -{ - struct rb_node *n; - u64 hash; - int len; - - if (self == NULL) - return NULL; - - len = strlen(name); - hash = crc32(name, len); - - n = self->secs.rb_node; - - while (n) { - struct section *s = rb_entry(n, struct section, rb_node); - - if (hash < s->hash) - n = n->rb_left; - else if (hash > s->hash) - n = n->rb_right; - else { - if (!strcmp(name, s->name)) - return s; - else - n = rb_next(&s->rb_node); - } - } - - return NULL; -} - -static size_t sec_dso__fprintf_section(struct section *self, FILE *fp) -{ - return fprintf(fp, "name:%s vma:%llx path:%s\n", - self->name, self->vma, self->path); -} - -size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp) -{ - size_t ret = fprintf(fp, "dso: %s\n", self->name); - - struct rb_node *nd; - for (nd = rb_first(&self->secs); nd; nd = rb_next(nd)) { - struct section *pos = rb_entry(nd, struct section, rb_node); - ret += sec_dso__fprintf_section(pos, fp); - } - - return ret; -} - -static struct section *section__new(const char *name, const char *path) -{ - struct section *self = calloc(1, sizeof(*self)); - - if (!self) - goto out_failure; - - self->name = calloc(1, strlen(name) + 1); - if (!self->name) - goto out_failure; - - self->path = calloc(1, strlen(path) + 1); - if (!self->path) - goto out_failure; - - strcpy(self->name, name); - strcpy(self->path, path); - self->hash = crc32(self->name, strlen(name)); - - return self; - -out_failure: - if (self) { - if (self->name) - free(self->name); - if (self->path) - free(self->path); - free(self); - } - - return NULL; -} - -/* module methods */ - -struct mod_dso *mod_dso__new_dso(const char *name) -{ - struct mod_dso *self = malloc(sizeof(*self) + strlen(name) + 1); - - if (self != NULL) { - strcpy(self->name, name); - self->mods = RB_ROOT; - self->find_module = mod_dso__find_module; - } - - return self; -} - -static void mod_dso__delete_module(struct module *self) -{ - free(((void *)self)); -} - -void mod_dso__delete_modules(struct mod_dso *self) -{ - struct module *pos; - struct rb_node *next = rb_first(&self->mods); - - while (next) { - pos = rb_entry(next, struct module, rb_node); - next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, &self->mods); - mod_dso__delete_module(pos); - } -} - -void mod_dso__delete_self(struct mod_dso *self) -{ - mod_dso__delete_modules(self); - free(self); -} - -static void mod_dso__insert_module(struct mod_dso *self, struct module *mod) -{ - struct rb_node **p = &self->mods.rb_node; - struct rb_node *parent = NULL; - const u64 hash = mod->hash; - struct module *m; - - while (*p != NULL) { - parent = *p; - m = rb_entry(parent, struct module, rb_node); - if (hash < m->hash) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - rb_link_node(&mod->rb_node, parent, p); - rb_insert_color(&mod->rb_node, &self->mods); -} - -struct module *mod_dso__find_module(struct mod_dso *self, const char *name) -{ - struct rb_node *n; - u64 hash; - int len; - - if (self == NULL) - return NULL; - - len = strlen(name); - hash = crc32(name, len); - - n = self->mods.rb_node; - - while (n) { - struct module *m = rb_entry(n, struct module, rb_node); - - if (hash < m->hash) - n = n->rb_left; - else if (hash > m->hash) - n = n->rb_right; - else { - if (!strcmp(name, m->name)) - return m; - else - n = rb_next(&m->rb_node); - } - } - - return NULL; -} - -static size_t mod_dso__fprintf_module(struct module *self, FILE *fp) -{ - return fprintf(fp, "name:%s path:%s\n", self->name, self->path); -} - -size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp) -{ - struct rb_node *nd; - size_t ret; - - ret = fprintf(fp, "dso: %s\n", self->name); - - for (nd = rb_first(&self->mods); nd; nd = rb_next(nd)) { - struct module *pos = rb_entry(nd, struct module, rb_node); - - ret += mod_dso__fprintf_module(pos, fp); - } - - return ret; -} - -static struct module *module__new(const char *name, const char *path) -{ - struct module *self = calloc(1, sizeof(*self)); - - if (!self) - goto out_failure; - - self->name = calloc(1, strlen(name) + 1); - if (!self->name) - goto out_failure; - - self->path = calloc(1, strlen(path) + 1); - if (!self->path) - goto out_failure; - - strcpy(self->name, name); - strcpy(self->path, path); - self->hash = crc32(self->name, strlen(name)); - - return self; - -out_failure: - if (self) { - if (self->name) - free(self->name); - if (self->path) - free(self->path); - free(self); - } - - return NULL; -} - -static int mod_dso__load_sections(struct module *mod) -{ - int count = 0, path_len; - struct dirent *entry; - char *line = NULL; - char *dir_path; - DIR *dir; - size_t n; - - path_len = strlen("/sys/module/"); - path_len += strlen(mod->name); - path_len += strlen("/sections/"); - - dir_path = calloc(1, path_len + 1); - if (dir_path == NULL) - goto out_failure; - - strcat(dir_path, "/sys/module/"); - strcat(dir_path, mod->name); - strcat(dir_path, "/sections/"); - - dir = opendir(dir_path); - if (dir == NULL) - goto out_free; - - while ((entry = readdir(dir))) { - struct section *section; - char *path, *vma; - int line_len; - FILE *file; - - if (!strcmp(".", entry->d_name) || !strcmp("..", entry->d_name)) - continue; - - path = calloc(1, path_len + strlen(entry->d_name) + 1); - if (path == NULL) - break; - strcat(path, dir_path); - strcat(path, entry->d_name); - - file = fopen(path, "r"); - if (file == NULL) { - free(path); - break; - } - - line_len = getline(&line, &n, file); - if (line_len < 0) { - free(path); - fclose(file); - break; - } - - if (!line) { - free(path); - fclose(file); - break; - } - - line[--line_len] = '\0'; /* \n */ - - vma = strstr(line, "0x"); - if (!vma) { - free(path); - fclose(file); - break; - } - vma += 2; - - section = section__new(entry->d_name, path); - if (!section) { - fprintf(stderr, "load_sections: allocation error\n"); - free(path); - fclose(file); - break; - } - - hex2u64(vma, §ion->vma); - sec_dso__insert_section(mod->sections, section); - - free(path); - fclose(file); - count++; - } - - closedir(dir); - free(line); - free(dir_path); - - return count; - -out_free: - free(dir_path); - -out_failure: - return count; -} - -static int mod_dso__load_module_paths(struct mod_dso *self) -{ - struct utsname uts; - int count = 0, len, err = -1; - char *line = NULL; - FILE *file; - char *dpath, *dir; - size_t n; - - if (uname(&uts) < 0) - return err; - - len = strlen("/lib/modules/"); - len += strlen(uts.release); - len += strlen("/modules.dep"); - - dpath = calloc(1, len + 1); - if (dpath == NULL) - return err; - - strcat(dpath, "/lib/modules/"); - strcat(dpath, uts.release); - strcat(dpath, "/modules.dep"); - - file = fopen(dpath, "r"); - if (file == NULL) - goto out_failure; - - dir = dirname(dpath); - if (!dir) - goto out_failure; - strcat(dir, "/"); - - while (!feof(file)) { - struct module *module; - char *name, *path, *tmp; - FILE *modfile; - int line_len; - - line_len = getline(&line, &n, file); - if (line_len < 0) - break; - - if (!line) - break; - - line[--line_len] = '\0'; /* \n */ - - path = strchr(line, ':'); - if (!path) - break; - *path = '\0'; - - path = strdup(line); - if (!path) - break; - - if (!strstr(path, dir)) { - if (strncmp(path, "kernel/", 7)) - break; - - free(path); - path = calloc(1, strlen(dir) + strlen(line) + 1); - if (!path) - break; - strcat(path, dir); - strcat(path, line); - } - - modfile = fopen(path, "r"); - if (modfile == NULL) - break; - fclose(modfile); - - name = strdup(path); - if (!name) - break; - - name = strtok(name, "/"); - tmp = name; - - while (tmp) { - tmp = strtok(NULL, "/"); - if (tmp) - name = tmp; - } - - name = strsep(&name, "."); - if (!name) - break; - - /* Quirk: replace '-' with '_' in all modules */ - for (len = strlen(name); len; len--) { - if (*(name+len) == '-') - *(name+len) = '_'; - } - - module = module__new(name, path); - if (!module) - break; - mod_dso__insert_module(self, module); - - module->sections = sec_dso__new_dso("sections"); - if (!module->sections) - break; - - module->active = mod_dso__load_sections(module); - - if (module->active > 0) - count++; - } - - if (feof(file)) - err = count; - else - fprintf(stderr, "load_module_paths: modules.dep parsing failure!\n"); - -out_failure: - if (dpath) - free(dpath); - if (file) - fclose(file); - if (line) - free(line); - - return err; -} - -int mod_dso__load_modules(struct mod_dso *dso) -{ - int err; - - err = mod_dso__load_module_paths(dso); - - return err; -} diff --git a/tools/perf/util/module.h b/tools/perf/util/module.h deleted file mode 100644 index 098e0412bc22..000000000000 --- a/tools/perf/util/module.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef __PERF_MODULE_ -#define __PERF_MODULE_ 1 - -#include -#include "../types.h" -#include -#include - -struct section { - struct rb_node rb_node; - u64 hash; - u64 vma; - char *name; - char *path; -}; - -struct sec_dso { - struct list_head node; - struct rb_root secs; - struct section *(*find_section)(struct sec_dso *, const char *name); - char name[0]; -}; - -struct module { - struct rb_node rb_node; - u64 hash; - char *name; - char *path; - struct sec_dso *sections; - int active; -}; - -struct mod_dso { - struct list_head node; - struct rb_root mods; - struct module *(*find_module)(struct mod_dso *, const char *name); - char name[0]; -}; - -struct sec_dso *sec_dso__new_dso(const char *name); -void sec_dso__delete_sections(struct sec_dso *self); -void sec_dso__delete_self(struct sec_dso *self); -size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp); -struct section *sec_dso__find_section(struct sec_dso *self, const char *name); - -struct mod_dso *mod_dso__new_dso(const char *name); -void mod_dso__delete_modules(struct mod_dso *self); -void mod_dso__delete_self(struct mod_dso *self); -size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp); -struct module *mod_dso__find_module(struct mod_dso *self, const char *name); -int mod_dso__load_modules(struct mod_dso *dso); - -#endif /* __PERF_MODULE_ */ diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 50e75abb1fdd..40c9acd41cad 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -129,20 +129,32 @@ sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width) int64_t sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) { - struct dso *dso_l = left->dso; - struct dso *dso_r = right->dso; + struct dso *dso_l = left->map ? left->map->dso : NULL; + struct dso *dso_r = right->map ? right->map->dso : NULL; + const char *dso_name_l, *dso_name_r; if (!dso_l || !dso_r) return cmp_null(dso_l, dso_r); - return strcmp(dso_l->name, dso_r->name); + if (verbose) { + dso_name_l = dso_l->long_name; + dso_name_r = dso_r->long_name; + } else { + dso_name_l = dso_l->short_name; + dso_name_r = dso_r->short_name; + } + + return strcmp(dso_name_l, dso_name_r); } size_t sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width) { - if (self->dso) - return repsep_fprintf(fp, "%-*s", width, self->dso->name); + if (self->map && self->map->dso) { + const char *dso_name = !verbose ? self->map->dso->short_name : + self->map->dso->long_name; + return repsep_fprintf(fp, "%-*s", width, dso_name); + } return repsep_fprintf(fp, "%*llx", width, (u64)self->ip); } @@ -169,20 +181,16 @@ sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used) { size_t ret = 0; - if (verbose) - ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, - dso__symtab_origin(self->dso)); + if (verbose) { + char o = self->map ? dso__symtab_origin(self->map->dso) : '!'; + ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, o); + } ret += repsep_fprintf(fp, "[%c] ", self->level); - if (self->sym) { + if (self->sym) ret += repsep_fprintf(fp, "%s", self->sym->name); - - if (self->sym->module) - ret += repsep_fprintf(fp, "\t[%s]", - self->sym->module->name); - } else { + else ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip); - } return ret; } diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 4684fd6d5c4a..13806d782af6 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -42,18 +42,15 @@ extern unsigned int threads__col_width; struct hist_entry { struct rb_node rb_node; - + u64 count; struct thread *thread; struct map *map; - struct dso *dso; struct symbol *sym; - struct symbol *parent; u64 ip; char level; + struct symbol *parent; struct callchain_node callchain; struct rb_root sorted_chain; - - u64 count; }; /* diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 559fb06210f5..e88296899470 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -2,12 +2,14 @@ #include "../perf.h" #include "string.h" #include "symbol.h" +#include "thread.h" #include "debug.h" #include #include #include +#include const char *sym_hist_filter; @@ -18,12 +20,15 @@ enum dso_origin { DSO__ORIG_UBUNTU, DSO__ORIG_BUILDID, DSO__ORIG_DSO, + DSO__ORIG_KMODULE, DSO__ORIG_NOT_FOUND, }; -static struct symbol *symbol__new(u64 start, u64 len, - const char *name, unsigned int priv_size, - u64 obj_start, int v) +static void dsos__add(struct dso *dso); +static struct dso *dsos__find(const char *name); + +static struct symbol *symbol__new(u64 start, u64 len, const char *name, + unsigned int priv_size, int v) { size_t namelen = strlen(name) + 1; struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen); @@ -32,10 +37,9 @@ static struct symbol *symbol__new(u64 start, u64 len, return NULL; if (v >= 2) - printf("new symbol: %016Lx [%08lx]: %s, hist: %p, obj_start: %p\n", - (u64)start, (unsigned long)len, name, self->hist, (void *)(unsigned long)obj_start); + printf("new symbol: %016Lx [%08lx]: %s, hist: %p\n", + start, (unsigned long)len, name, self->hist); - self->obj_start= obj_start; self->hist = NULL; self->hist_sum = 0; @@ -60,12 +64,8 @@ static void symbol__delete(struct symbol *self, unsigned int priv_size) static size_t symbol__fprintf(struct symbol *self, FILE *fp) { - if (!self->module) - return fprintf(fp, " %llx-%llx %s\n", + return fprintf(fp, " %llx-%llx %s\n", self->start, self->end, self->name); - else - return fprintf(fp, " %llx-%llx %s \t[%s]\n", - self->start, self->end, self->name, self->module->name); } struct dso *dso__new(const char *name, unsigned int sym_priv_size) @@ -74,6 +74,8 @@ struct dso *dso__new(const char *name, unsigned int sym_priv_size) if (self != NULL) { strcpy(self->name, name); + self->long_name = self->name; + self->short_name = self->name; self->syms = RB_ROOT; self->sym_priv_size = sym_priv_size; self->find_symbol = dso__find_symbol; @@ -100,6 +102,8 @@ static void dso__delete_symbols(struct dso *self) void dso__delete(struct dso *self) { dso__delete_symbols(self); + if (self->long_name != self->name) + free(self->long_name); free(self); } @@ -147,7 +151,7 @@ struct symbol *dso__find_symbol(struct dso *self, u64 ip) size_t dso__fprintf(struct dso *self, FILE *fp) { - size_t ret = fprintf(fp, "dso: %s\n", self->name); + size_t ret = fprintf(fp, "dso: %s\n", self->long_name); struct rb_node *nd; for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) { @@ -158,7 +162,8 @@ size_t dso__fprintf(struct dso *self, FILE *fp) return ret; } -static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v) +static int dso__load_kallsyms(struct dso *self, struct map *map, + symbol_filter_t filter, int v) { struct rb_node *nd, *prevnd; char *line = NULL; @@ -200,12 +205,12 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v) * Well fix up the end later, when we have all sorted. */ sym = symbol__new(start, 0xdead, line + len + 2, - self->sym_priv_size, 0, v); + self->sym_priv_size, v); if (sym == NULL) goto out_delete_line; - if (filter && filter(self, sym)) + if (filter && filter(map, sym)) symbol__delete(sym, self->sym_priv_size); else { dso__insert_symbol(self, sym); @@ -241,14 +246,15 @@ out_failure: return -1; } -static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int v) +static int dso__load_perf_map(struct dso *self, struct map *map, + symbol_filter_t filter, int v) { char *line = NULL; size_t n; FILE *file; int nr_syms = 0; - file = fopen(self->name, "r"); + file = fopen(self->long_name, "r"); if (file == NULL) goto out_failure; @@ -279,12 +285,12 @@ static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int v) continue; sym = symbol__new(start, size, line + len, - self->sym_priv_size, start, v); + self->sym_priv_size, v); if (sym == NULL) goto out_delete_line; - if (filter && filter(self, sym)) + if (filter && filter(map, sym)) symbol__delete(sym, self->sym_priv_size); else { dso__insert_symbol(self, sym); @@ -410,7 +416,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v) Elf *elf; int nr = 0, symidx, fd, err = 0; - fd = open(self->name, O_RDONLY); + fd = open(self->long_name, O_RDONLY); if (fd < 0) goto out; @@ -478,7 +484,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v) "%s@plt", elf_sym__name(&sym, symstrs)); f = symbol__new(plt_offset, shdr_plt.sh_entsize, - sympltname, self->sym_priv_size, 0, v); + sympltname, self->sym_priv_size, v); if (!f) goto out_elf_end; @@ -496,7 +502,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v) "%s@plt", elf_sym__name(&sym, symstrs)); f = symbol__new(plt_offset, shdr_plt.sh_entsize, - sympltname, self->sym_priv_size, 0, v); + sympltname, self->sym_priv_size, v); if (!f) goto out_elf_end; @@ -515,12 +521,13 @@ out_close: return nr; out: fprintf(stderr, "%s: problems reading %s PLT info.\n", - __func__, self->name); + __func__, self->long_name); return 0; } -static int dso__load_sym(struct dso *self, int fd, const char *name, - symbol_filter_t filter, int v, struct module *mod) +static int dso__load_sym(struct dso *self, struct map *map, const char *name, + int fd, symbol_filter_t filter, int kernel, + int kmodule, int v) { Elf_Data *symstrs, *secstrs; uint32_t nr_syms; @@ -532,7 +539,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, GElf_Sym sym; Elf_Scn *sec, *sec_strndx; Elf *elf; - int nr = 0, kernel = !strcmp("[kernel]", self->name); + int nr = 0; elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); if (elf == NULL) { @@ -589,8 +596,6 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, struct symbol *f; const char *elf_name; char *demangled; - u64 obj_start; - struct section *section = NULL; int is_label = elf_sym__is_label(&sym); const char *section_name; @@ -607,7 +612,6 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, continue; section_name = elf_sec__name(&shdr, secstrs); - obj_start = sym.st_value; if (self->adjust_symbols) { if (v >= 2) @@ -615,18 +619,8 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset); sym.st_value -= shdr.sh_addr - shdr.sh_offset; - } - - if (mod) { - section = mod->sections->find_section(mod->sections, section_name); - if (section) - sym.st_value += section->vma; - else { - fprintf(stderr, "dso__load_sym() module %s lookup of %s failed\n", - mod->name, section_name); - goto out_elf_end; - } - } + } else if (kmodule) + sym.st_value += shdr.sh_offset; /* * We need to figure out if the object was created from C++ sources * DWARF DW_compile_unit has this, but we don't always have access @@ -638,15 +632,14 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, elf_name = demangled; f = symbol__new(sym.st_value, sym.st_size, elf_name, - self->sym_priv_size, obj_start, v); + self->sym_priv_size, v); free(demangled); if (!f) goto out_elf_end; - if (filter && filter(self, f)) + if (filter && filter(map, f)) symbol__delete(f, self->sym_priv_size); else { - f->module = mod; dso__insert_symbol(self, f); nr++; } @@ -671,7 +664,7 @@ static char *dso__read_build_id(struct dso *self, int v) char *build_id = NULL, *bid; unsigned char *raw; Elf *elf; - int fd = open(self->name, O_RDONLY); + int fd = open(self->long_name, O_RDONLY); if (fd < 0) goto out; @@ -680,7 +673,7 @@ static char *dso__read_build_id(struct dso *self, int v) if (elf == NULL) { if (v) fprintf(stderr, "%s: cannot read %s ELF file.\n", - __func__, self->name); + __func__, self->long_name); goto out_close; } @@ -709,7 +702,7 @@ static char *dso__read_build_id(struct dso *self, int v) bid += 2; } if (v >= 2) - printf("%s(%s): %s\n", __func__, self->name, build_id); + printf("%s(%s): %s\n", __func__, self->long_name, build_id); out_elf_end: elf_end(elf); out_close: @@ -727,6 +720,7 @@ char dso__symtab_origin(const struct dso *self) [DSO__ORIG_UBUNTU] = 'u', [DSO__ORIG_BUILDID] = 'b', [DSO__ORIG_DSO] = 'd', + [DSO__ORIG_KMODULE] = 'K', }; if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND) @@ -734,7 +728,7 @@ char dso__symtab_origin(const struct dso *self) return origin[self->origin]; } -int dso__load(struct dso *self, symbol_filter_t filter, int v) +int dso__load(struct dso *self, struct map *map, symbol_filter_t filter, int v) { int size = PATH_MAX; char *name = malloc(size), *build_id = NULL; @@ -747,7 +741,7 @@ int dso__load(struct dso *self, symbol_filter_t filter, int v) self->adjust_symbols = 0; if (strncmp(self->name, "/tmp/perf-", 10) == 0) { - ret = dso__load_perf_map(self, filter, v); + ret = dso__load_perf_map(self, map, filter, v); self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT : DSO__ORIG_NOT_FOUND; return ret; @@ -760,10 +754,12 @@ more: self->origin++; switch (self->origin) { case DSO__ORIG_FEDORA: - snprintf(name, size, "/usr/lib/debug%s.debug", self->name); + snprintf(name, size, "/usr/lib/debug%s.debug", + self->long_name); break; case DSO__ORIG_UBUNTU: - snprintf(name, size, "/usr/lib/debug%s", self->name); + snprintf(name, size, "/usr/lib/debug%s", + self->long_name); break; case DSO__ORIG_BUILDID: build_id = dso__read_build_id(self, v); @@ -777,7 +773,7 @@ more: self->origin++; /* Fall thru */ case DSO__ORIG_DSO: - snprintf(name, size, "%s", self->name); + snprintf(name, size, "%s", self->long_name); break; default: @@ -787,7 +783,7 @@ more: fd = open(name, O_RDONLY); } while (fd < 0); - ret = dso__load_sym(self, fd, name, filter, v, NULL); + ret = dso__load_sym(self, map, name, fd, filter, 0, 0, v); close(fd); /* @@ -808,89 +804,247 @@ out: return ret; } -static int dso__load_module(struct dso *self, struct mod_dso *mods, const char *name, - symbol_filter_t filter, int v) +static struct rb_root kernel_maps; +struct map *kernel_map; + +static void kernel_maps__insert(struct map *map) { - struct module *mod = mod_dso__find_module(mods, name); - int err = 0, fd; + maps__insert(&kernel_maps, map); +} - if (mod == NULL || !mod->active) - return err; +struct symbol *kernel_maps__find_symbol(u64 ip, struct map **mapp) +{ + /* + * We can't have kernel_map in kernel_maps because it spans an address + * space that includes the modules. The right way to fix this is to + * create several maps, so that we don't have overlapping ranges with + * modules. For now lets look first on the kernel dso. + */ + struct map *map = maps__find(&kernel_maps, ip); + struct symbol *sym; + + if (map) { + ip = map->map_ip(map, ip); + sym = map->dso->find_symbol(map->dso, ip); + } else { + map = kernel_map; + sym = map->dso->find_symbol(map->dso, ip); + } - fd = open(mod->path, O_RDONLY); + if (mapp) + *mapp = map; - if (fd < 0) + return sym; +} + +struct map *kernel_maps__find_by_dso_name(const char *name) +{ + struct rb_node *nd; + + for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) { + struct map *map = rb_entry(nd, struct map, rb_node); + + if (map->dso && strcmp(map->dso->name, name) == 0) + return map; + } + + return NULL; +} + +static int dso__load_module_sym(struct dso *self, struct map *map, + symbol_filter_t filter, int v) +{ + int err = 0, fd = open(self->long_name, O_RDONLY); + + if (fd < 0) { + if (v) + fprintf(stderr, "%s: cannot open %s\n", + __func__, self->long_name); return err; + } - err = dso__load_sym(self, fd, name, filter, v, mod); + err = dso__load_sym(self, map, self->long_name, fd, filter, 0, 1, v); close(fd); return err; } -int dso__load_modules(struct dso *self, symbol_filter_t filter, int v) +static int dsos__load_modules_sym_dir(char *dirname, + symbol_filter_t filter, int v) { - struct mod_dso *mods = mod_dso__new_dso("modules"); - struct module *pos; - struct rb_node *next; - int err, count = 0; + struct dirent *dent; + int nr_symbols = 0, err; + DIR *dir = opendir(dirname); - err = mod_dso__load_modules(mods); + if (!dir) { + if (v) + fprintf(stderr, "%s: cannot open %s dir\n", __func__, + dirname); + return -1; + } - if (err <= 0) - return err; + while ((dent = readdir(dir)) != NULL) { + char path[PATH_MAX]; + + if (dent->d_type == DT_DIR) { + if (!strcmp(dent->d_name, ".") || + !strcmp(dent->d_name, "..")) + continue; + + snprintf(path, sizeof(path), "%s/%s", + dirname, dent->d_name); + err = dsos__load_modules_sym_dir(path, filter, v); + if (err < 0) + goto failure; + } else { + char *dot = strrchr(dent->d_name, '.'), + dso_name[PATH_MAX]; + struct map *map; + struct rb_node *last; + + if (dot == NULL || strcmp(dot, ".ko")) + continue; + snprintf(dso_name, sizeof(dso_name), "[%.*s]", + (int)(dot - dent->d_name), dent->d_name); + + map = kernel_maps__find_by_dso_name(dso_name); + if (map == NULL) + continue; + + snprintf(path, sizeof(path), "%s/%s", + dirname, dent->d_name); + + map->dso->long_name = strdup(path); + if (map->dso->long_name == NULL) + goto failure; + + err = dso__load_module_sym(map->dso, map, filter, v); + if (err < 0) + goto failure; + last = rb_last(&map->dso->syms); + if (last) { + struct symbol *sym; + sym = rb_entry(last, struct symbol, rb_node); + map->end = map->start + sym->end; + } + } + nr_symbols += err; + } - /* - * Iterate over modules, and load active symbols. - */ - next = rb_first(&mods->mods); - while (next) { - pos = rb_entry(next, struct module, rb_node); - err = dso__load_module(self, mods, pos->name, filter, v); + return nr_symbols; +failure: + closedir(dir); + return -1; +} - if (err < 0) - break; +static int dsos__load_modules_sym(symbol_filter_t filter, int v) +{ + struct utsname uts; + char modules_path[PATH_MAX]; - next = rb_next(&pos->rb_node); - count += err; - } + if (uname(&uts) < 0) + return -1; - if (err < 0) { - mod_dso__delete_modules(mods); - mod_dso__delete_self(mods); - return err; - } + snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel", + uts.release); - return count; + return dsos__load_modules_sym_dir(modules_path, filter, v); } -static inline void dso__fill_symbol_holes(struct dso *self) +/* + * Constructor variant for modules (where we know from /proc/modules where + * they are loaded) and for vmlinux, where only after we load all the + * symbols we'll know where it starts and ends. + */ +static struct map *map__new2(u64 start, struct dso *dso) { - struct symbol *prev = NULL; - struct rb_node *nd; + struct map *self = malloc(sizeof(*self)); - for (nd = rb_last(&self->syms); nd; nd = rb_prev(nd)) { - struct symbol *pos = rb_entry(nd, struct symbol, rb_node); + if (self != NULL) { + self->start = start; + /* + * Will be filled after we load all the symbols + */ + self->end = 0; + + self->pgoff = 0; + self->dso = dso; + self->map_ip = map__map_ip; + RB_CLEAR_NODE(&self->rb_node); + } + return self; +} + +int dsos__load_modules(unsigned int sym_priv_size, + symbol_filter_t filter, int v) +{ + char *line = NULL; + size_t n; + FILE *file = fopen("/proc/modules", "r"); + struct map *map; - if (prev) { - u64 hole = 0; - int alias = pos->start == prev->start; + if (file == NULL) + return -1; - if (!alias) - hole = prev->start - pos->end - 1; + while (!feof(file)) { + char name[PATH_MAX]; + u64 start; + struct dso *dso; + char *sep; + int line_len; - if (hole || alias) { - if (alias) - pos->end = prev->end; - else if (hole) - pos->end = prev->start - 1; - } + line_len = getline(&line, &n, file); + if (line_len < 0) + break; + + if (!line) + goto out_failure; + + line[--line_len] = '\0'; /* \n */ + + sep = strrchr(line, 'x'); + if (sep == NULL) + continue; + + hex2u64(sep + 1, &start); + + sep = strchr(line, ' '); + if (sep == NULL) + continue; + + *sep = '\0'; + + snprintf(name, sizeof(name), "[%s]", line); + dso = dso__new(name, sym_priv_size); + + if (dso == NULL) + goto out_delete_line; + + map = map__new2(start, dso); + if (map == NULL) { + dso__delete(dso); + goto out_delete_line; } - prev = pos; + + dso->origin = DSO__ORIG_KMODULE; + kernel_maps__insert(map); + dsos__add(dso); } + + free(line); + fclose(file); + + v = 1; + return dsos__load_modules_sym(filter, v); + +out_delete_line: + free(line); +out_failure: + return -1; } -static int dso__load_vmlinux(struct dso *self, const char *vmlinux, +static int dso__load_vmlinux(struct dso *self, struct map *map, + const char *vmlinux, symbol_filter_t filter, int v) { int err, fd = open(vmlinux, O_RDONLY); @@ -898,28 +1052,36 @@ static int dso__load_vmlinux(struct dso *self, const char *vmlinux, if (fd < 0) return -1; - err = dso__load_sym(self, fd, vmlinux, filter, v, NULL); - - if (err > 0) - dso__fill_symbol_holes(self); + err = dso__load_sym(self, map, self->long_name, fd, filter, 1, 0, v); close(fd); return err; } -int dso__load_kernel(struct dso *self, const char *vmlinux, - symbol_filter_t filter, int v, int use_modules) +int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, + symbol_filter_t filter, int v, int use_modules) { int err = -1; + struct dso *dso = dso__new(vmlinux, sym_priv_size); + + if (dso == NULL) + return -1; + + dso->short_name = "[kernel]"; + kernel_map = map__new2(0, dso); + if (kernel_map == NULL) + goto out_delete_dso; + + kernel_map->map_ip = vdso__map_ip; if (vmlinux) { - err = dso__load_vmlinux(self, vmlinux, filter, v); + err = dso__load_vmlinux(dso, kernel_map, vmlinux, filter, v); if (err > 0 && use_modules) { - int syms = dso__load_modules(self, filter, v); + int syms = dsos__load_modules(sym_priv_size, filter, v); if (syms < 0) { - fprintf(stderr, "dso__load_modules failed!\n"); + fprintf(stderr, "dsos__load_modules failed!\n"); return syms; } err += syms; @@ -927,18 +1089,34 @@ int dso__load_kernel(struct dso *self, const char *vmlinux, } if (err <= 0) - err = dso__load_kallsyms(self, filter, v); + err = dso__load_kallsyms(dso, kernel_map, filter, v); + + if (err > 0) { + struct rb_node *node = rb_first(&dso->syms); + struct symbol *sym = rb_entry(node, struct symbol, rb_node); - if (err > 0) - self->origin = DSO__ORIG_KERNEL; + kernel_map->start = sym->start; + node = rb_last(&dso->syms); + sym = rb_entry(node, struct symbol, rb_node); + kernel_map->end = sym->end; + + dso->origin = DSO__ORIG_KERNEL; + /* + * XXX See kernel_maps__find_symbol comment + * kernel_maps__insert(kernel_map) + */ + dsos__add(dso); + } return err; + +out_delete_dso: + dso__delete(dso); + return -1; } LIST_HEAD(dsos); -struct dso *kernel_dso; struct dso *vdso; -struct dso *hypervisor_dso; const char *vmlinux_name = "vmlinux"; int modules; @@ -970,7 +1148,7 @@ struct dso *dsos__findnew(const char *name) if (!dso) goto out_delete_dso; - nr = dso__load(dso, NULL, verbose); + nr = dso__load(dso, NULL, NULL, verbose); if (nr < 0) { eprintf("Failed to open: %s\n", name); goto out_delete_dso; @@ -995,43 +1173,20 @@ void dsos__fprintf(FILE *fp) dso__fprintf(pos, fp); } -static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip) -{ - return dso__find_symbol(dso, ip); -} - int load_kernel(void) { - int err; - - kernel_dso = dso__new("[kernel]", 0); - if (!kernel_dso) + if (dsos__load_kernel(vmlinux_name, 0, NULL, verbose, modules) <= 0) return -1; - err = dso__load_kernel(kernel_dso, vmlinux_name, NULL, verbose, modules); - if (err <= 0) { - dso__delete(kernel_dso); - kernel_dso = NULL; - } else - dsos__add(kernel_dso); - vdso = dso__new("[vdso]", 0); if (!vdso) return -1; - vdso->find_symbol = vdso__find_symbol; - dsos__add(vdso); - hypervisor_dso = dso__new("[hypervisor]", 0); - if (!hypervisor_dso) - return -1; - dsos__add(hypervisor_dso); - - return err; + return 0; } - void symbol__init(void) { elf_version(EV_CURRENT); diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index ee164f659ed3..5339fd82ec96 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -5,7 +5,6 @@ #include "types.h" #include #include -#include "module.h" #include "event.h" #ifdef HAVE_CPLUS_DEMANGLE @@ -36,10 +35,8 @@ struct symbol { struct rb_node rb_node; u64 start; u64 end; - u64 obj_start; u64 hist_sum; u64 *hist; - struct module *module; void *priv; char name[0]; }; @@ -52,12 +49,14 @@ struct dso { unsigned char adjust_symbols; unsigned char slen_calculated; unsigned char origin; + const char *short_name; + char *long_name; char name[0]; }; extern const char *sym_hist_filter; -typedef int (*symbol_filter_t)(struct dso *self, struct symbol *sym); +typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); struct dso *dso__new(const char *name, unsigned int sym_priv_size); void dso__delete(struct dso *self); @@ -69,10 +68,12 @@ static inline void *dso__sym_priv(struct dso *self, struct symbol *sym) struct symbol *dso__find_symbol(struct dso *self, u64 ip); -int dso__load_kernel(struct dso *self, const char *vmlinux, - symbol_filter_t filter, int verbose, int modules); -int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose); -int dso__load(struct dso *self, symbol_filter_t filter, int verbose); +int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, + symbol_filter_t filter, int verbose, int modules); +int dsos__load_modules(unsigned int sym_priv_size, symbol_filter_t filter, + int verbose); +int dso__load(struct dso *self, struct map *map, symbol_filter_t filter, + int verbose); struct dso *dsos__findnew(const char *name); void dsos__fprintf(FILE *fp); @@ -84,9 +85,8 @@ int load_kernel(void); void symbol__init(void); extern struct list_head dsos; -extern struct dso *kernel_dso; +extern struct map *kernel_map; extern struct dso *vdso; -extern struct dso *hypervisor_dso; extern const char *vmlinux_name; extern int modules; #endif /* __PERF_SYMBOL */ diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 9d0945cc66d1..3b56aebb1f4b 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -16,6 +16,7 @@ static struct thread *thread__new(pid_t pid) if (self->comm) snprintf(self->comm, 32, ":%d", self->pid); self->maps = RB_ROOT; + INIT_LIST_HEAD(&self->removed_maps); } return self; @@ -32,13 +33,20 @@ int thread__set_comm(struct thread *self, const char *comm) static size_t thread__fprintf(struct thread *self, FILE *fp) { struct rb_node *nd; - size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm); + struct map *pos; + size_t ret = fprintf(fp, "Thread %d %s\nCurrent maps:\n", + self->pid, self->comm); for (nd = rb_first(&self->maps); nd; nd = rb_next(nd)) { - struct map *pos = rb_entry(nd, struct map, rb_node); + pos = rb_entry(nd, struct map, rb_node); ret += map__fprintf(pos, fp); } + ret = fprintf(fp, "Removed maps:\n"); + + list_for_each_entry(pos, &self->removed_maps, node) + ret += map__fprintf(pos, fp); + return ret; } @@ -112,21 +120,13 @@ static void thread__remove_overlappings(struct thread *self, struct map *map) map__fprintf(pos, stdout); } - if (map->start <= pos->start && map->end > pos->start) - pos->start = map->end; - - if (map->end >= pos->end && map->start < pos->end) - pos->end = map->start; - - if (verbose >= 2) { - printf("after collision:\n"); - map__fprintf(pos, stdout); - } - - if (pos->start >= pos->end) { - rb_erase(&pos->rb_node, &self->maps); - free(pos); - } + rb_erase(&pos->rb_node, &self->maps); + /* + * We may have references to this map, for instance in some + * hist_entry instances, so just move them to a separate + * list. + */ + list_add_tail(&pos->node, &self->removed_maps); } } diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index bbb37c1a52ee..845d9b62f96f 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -8,6 +8,7 @@ struct thread { struct rb_node rb_node; struct rb_root maps; + struct list_head removed_maps; pid_t pid; char shortname[3]; char *comm; @@ -25,6 +26,9 @@ size_t threads__fprintf(FILE *fp, struct rb_root *threads); void maps__insert(struct rb_root *maps, struct map *map); struct map *maps__find(struct rb_root *maps, u64 ip); +struct symbol *kernel_maps__find_symbol(const u64 ip, struct map **mapp); +struct map *kernel_maps__find_by_dso_name(const char *name); + static inline struct map *thread__find_map(struct thread *self, u64 ip) { return self ? maps__find(&self->maps, ip) : NULL; -- cgit v1.2.3 From 9735abf11bec48bfbbb1b54772a02deb2ae0c403 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 3 Oct 2009 10:42:45 -0300 Subject: perf tools: Move hist_entry__add common code to hist.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now perf report and annotate do the callgraph/hit processing in their specialized hist_entry__add functions. Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Frédéric Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 50 +++++++--------------------------- tools/perf/builtin-report.c | 63 +++++++++++-------------------------------- tools/perf/util/hist.c | 46 +++++++++++++++++++++++++++++++ tools/perf/util/hist.h | 3 +++ 4 files changed, 74 insertions(+), 88 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 7d5a3b1bcda9..855094234f2d 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -80,48 +80,16 @@ static void hist_hit(struct hist_entry *he, u64 ip) sym->hist[offset]); } -static int -hist_entry__add(struct thread *thread, struct map *map, - struct symbol *sym, u64 ip, char level) +static int hist_entry__add(struct thread *thread, struct map *map, + struct symbol *sym, u64 ip, u64 count, char level) { - struct rb_node **p = &hist.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *he; - struct hist_entry entry = { - .thread = thread, - .map = map, - .sym = sym, - .ip = ip, - .level = level, - .count = 1, - }; - int cmp; - - while (*p != NULL) { - parent = *p; - he = rb_entry(parent, struct hist_entry, rb_node); - - cmp = hist_entry__cmp(&entry, he); - - if (!cmp) { - hist_hit(he, ip); - - return 0; - } - - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - he = malloc(sizeof(*he)); - if (!he) + bool hit; + struct hist_entry *he = __hist_entry__add(thread, map, sym, NULL, ip, + count, level, &hit); + if (he == NULL) return -ENOMEM; - *he = entry; - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &hist); - + if (hit) + hist_hit(he, ip); return 0; } @@ -191,7 +159,7 @@ got_map: } if (show & show_mask) { - if (hist_entry__add(thread, map, sym, ip, level)) { + if (hist_entry__add(thread, map, sym, ip, 1, level)) { fprintf(stderr, "problem incrementing symbol count, skipping event\n"); return -1; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 3ed3baf96ffb..0e83ffcbe55a 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -407,9 +407,9 @@ static int call__match(struct symbol *sym) return 0; } -static struct symbol ** -resolve_callchain(struct thread *thread, struct map *map, - struct ip_callchain *chain, struct hist_entry *entry) +static struct symbol **resolve_callchain(struct thread *thread, struct map *map, + struct ip_callchain *chain, + struct symbol **parent) { u64 context = PERF_CONTEXT_MAX; struct symbol **syms = NULL; @@ -444,9 +444,8 @@ resolve_callchain(struct thread *thread, struct map *map, } if (sym) { - if (sort__has_parent && call__match(sym) && - !entry->parent) - entry->parent = sym; + if (sort__has_parent && !*parent && call__match(sym)) + *parent = sym; if (!callchain) break; syms[i] = sym; @@ -465,57 +464,27 @@ hist_entry__add(struct thread *thread, struct map *map, struct symbol *sym, u64 ip, struct ip_callchain *chain, char level, u64 count) { - struct rb_node **p = &hist.rb_node; - struct rb_node *parent = NULL; + struct symbol **syms = NULL, *parent = NULL; + bool hit; struct hist_entry *he; - struct symbol **syms = NULL; - struct hist_entry entry = { - .thread = thread, - .map = map, - .sym = sym, - .ip = ip, - .level = level, - .count = count, - .parent = NULL, - .sorted_chain = RB_ROOT - }; - int cmp; if ((sort__has_parent || callchain) && chain) - syms = resolve_callchain(thread, map, chain, &entry); - - while (*p != NULL) { - parent = *p; - he = rb_entry(parent, struct hist_entry, rb_node); - - cmp = hist_entry__cmp(&entry, he); + syms = resolve_callchain(thread, map, chain, &parent); - if (!cmp) { - he->count += count; - if (callchain) { - append_chain(&he->callchain, chain, syms); - free(syms); - } - return 0; - } + he = __hist_entry__add(thread, map, sym, parent, + ip, count, level, &hit); + if (he == NULL) + return -ENOMEM; - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } + if (hit) + he->count += count; - he = malloc(sizeof(*he)); - if (!he) - return -ENOMEM; - *he = entry; if (callchain) { - callchain_init(&he->callchain); + if (!hit) + callchain_init(&he->callchain); append_chain(&he->callchain, chain, syms); free(syms); } - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &hist); return 0; } diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 82808dc4f8e3..7393a02fd8d4 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -21,6 +21,52 @@ unsigned long total_lost; * histogram, sorted on item, collects counts */ +struct hist_entry *__hist_entry__add(struct thread *thread, struct map *map, + struct symbol *sym, + struct symbol *sym_parent, + u64 ip, u64 count, char level, bool *hit) +{ + struct rb_node **p = &hist.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *he; + struct hist_entry entry = { + .thread = thread, + .map = map, + .sym = sym, + .ip = ip, + .level = level, + .count = count, + .parent = sym_parent, + }; + int cmp; + + while (*p != NULL) { + parent = *p; + he = rb_entry(parent, struct hist_entry, rb_node); + + cmp = hist_entry__cmp(&entry, he); + + if (!cmp) { + *hit = true; + return he; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + he = malloc(sizeof(*he)); + if (!he) + return NULL; + *he = entry; + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &hist); + *hit = false; + return he; +} + int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) { diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 9a8daa12b43a..ac2149c559b0 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -36,6 +36,9 @@ extern unsigned long total_fork; extern unsigned long total_unknown; extern unsigned long total_lost; +struct hist_entry *__hist_entry__add(struct thread *thread, struct map *map, + struct symbol *sym, struct symbol *parent, + u64 ip, u64 count, char level, bool *hit); extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *); extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *); extern void hist_entry__free(struct hist_entry *); -- cgit v1.2.3 From ec218fc4a796a1b584741d59ef22615d96981188 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 3 Oct 2009 20:30:48 -0300 Subject: perf tools: Remove show_mask bitmask MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As it was not being exposed via any command line and with --dsos/--comms we can do this and even more, like asking for just kernel + some module: [root@doppio linux-2.6-tip]# perf report --dsos \[kernel\],\[drm\] --vmlinux /home/acme/git/build/tip-recvmmsg/vmlinux --modules | head -15 # Samples: 619669 # # Overhead Command Shared Object Symbol # ........ ............... ............. ...... # 7.12% swapper [kernel] [k] read_hpet 6.86% init [kernel] [k] read_hpet 6.22% init [kernel] [k] mwait_idle_with_hints 5.34% swapper [kernel] [k] mwait_idle_with_hints 3.01% firefox [kernel] [.] vread_hpet 2.14% Xorg [drm] [k] drm_clflush_pages 2.09% pidgin [kernel] [.] vread_hpet 1.58% npviewer.bin [kernel] [.] vread_hpet 1.37% swapper [kernel] [k] hpet_next_event 1.23% Xorg [kernel] [k] read_hpet [root@doppio linux-2.6-tip]# Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: <20091003233048.GA30535@ghostprotocols.net> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 + tools/perf/builtin-annotate.c | 15 ++++----------- tools/perf/builtin-report.c | 35 +++++++++++++---------------------- tools/perf/util/event.h | 6 ------ 4 files changed, 18 insertions(+), 39 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 8e7509f2d882..2c309a5c6868 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -323,6 +323,7 @@ LIB_H += ../../include/linux/rbtree.h LIB_H += ../../include/linux/list.h LIB_H += util/include/linux/list.h LIB_H += perf.h +LIB_H += util/event.h LIB_H += util/types.h LIB_H += util/levenshtein.h LIB_H += util/parse-options.h diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 855094234f2d..35ed97bd0c63 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -29,7 +29,6 @@ static char const *input_name = "perf.data"; static int force; static int input; -static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; static int full_paths; @@ -97,7 +96,6 @@ static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { char level; - int show = 0; struct thread *thread; u64 ip = event->ip.ip; struct map *map = NULL; @@ -121,13 +119,11 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) } if (event->header.misc & PERF_RECORD_MISC_KERNEL) { - show = SHOW_KERNEL; level = 'k'; sym = kernel_maps__find_symbol(ip, &map); dump_printf(" ...... dso: %s\n", map ? map->dso->long_name : ""); } else if (event->header.misc & PERF_RECORD_MISC_USER) { - show = SHOW_USER; level = '.'; map = thread__find_map(thread, ip); if (map != NULL) { @@ -153,17 +149,14 @@ got_map: dump_printf(" ...... dso: %s\n", map ? map->dso->long_name : ""); } else { - show = SHOW_HV; level = 'H'; dump_printf(" ...... dso: [hypervisor]\n"); } - if (show & show_mask) { - if (hist_entry__add(thread, map, sym, ip, 1, level)) { - fprintf(stderr, - "problem incrementing symbol count, skipping event\n"); - return -1; - } + if (hist_entry__add(thread, map, sym, ip, 1, level)) { + fprintf(stderr, "problem incrementing symbol count, " + "skipping event\n"); + return -1; } total++; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 0e83ffcbe55a..fe4aadc9630f 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -38,7 +38,6 @@ static struct strlist *dso_list, *comm_list, *sym_list; static int force; static int input; -static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; static int full_paths; static int show_nr_samples; @@ -600,7 +599,6 @@ static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { char level; - int show = 0; struct symbol *sym = NULL; struct thread *thread; u64 ip = event->ip.ip; @@ -657,42 +655,35 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; if (cpumode == PERF_RECORD_MISC_KERNEL) { - show = SHOW_KERNEL; level = 'k'; - sym = kernel_maps__find_symbol(ip, &map); dump_printf(" ...... dso: %s\n", map ? map->dso->long_name : ""); } else if (cpumode == PERF_RECORD_MISC_USER) { - - show = SHOW_USER; level = '.'; sym = resolve_symbol(thread, &map, &ip); } else { - show = SHOW_HV; level = 'H'; - dump_printf(" ...... dso: [hypervisor]\n"); } - if (show & show_mask) { - if (dso_list && - (!map || !map->dso || - !(strlist__has_entry(dso_list, map->dso->short_name) || - (map->dso->short_name != map->dso->long_name && - strlist__has_entry(dso_list, map->dso->long_name))))) - return 0; + if (dso_list && + (!map || !map->dso || + !(strlist__has_entry(dso_list, map->dso->short_name) || + (map->dso->short_name != map->dso->long_name && + strlist__has_entry(dso_list, map->dso->long_name))))) + return 0; - if (sym_list && sym && !strlist__has_entry(sym_list, sym->name)) - return 0; + if (sym_list && sym && !strlist__has_entry(sym_list, sym->name)) + return 0; - if (hist_entry__add(thread, map, sym, ip, - chain, level, period)) { - eprintf("problem incrementing symbol count, skipping event\n"); - return -1; - } + if (hist_entry__add(thread, map, sym, ip, + chain, level, period)) { + eprintf("problem incrementing symbol count, skipping event\n"); + return -1; } + total += period; return 0; diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index a39520e6ae8f..c2e62be62798 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -6,12 +6,6 @@ #include #include -enum { - SHOW_KERNEL = 1, - SHOW_USER = 2, - SHOW_HV = 4, -}; - /* * PERF_SAMPLE_IP | PERF_SAMPLE_TID | * */ -- cgit v1.2.3 From 5c2068059a0e852f72b7c2608d92170b752d821f Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 5 Oct 2009 14:26:15 -0300 Subject: perf top: Keep the default of asking for kernel module symbols MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index befef842757e..34d48c1b7a8b 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -804,10 +804,8 @@ static int symbol_filter(struct map *map, struct symbol *sym) static int parse_symbols(void) { - int use_modules = vmlinux_name ? 1 : 0; - if (dsos__load_kernel(vmlinux_name, sizeof(struct sym_entry), - symbol_filter, verbose, use_modules) <= 0) + symbol_filter, verbose, 1) <= 0) return -1; if (dump_symtab) -- cgit v1.2.3 From af427bf529c5991be8d1a36f43e2d0141f532f63 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 5 Oct 2009 14:26:17 -0300 Subject: perf tools: Create maps for modules when processing kallsyms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So that we get kallsyms processing closer to vmlinux + modules symtabs processing. One change in behaviour is that since when one specifies --vmlinux -m should be used to ask for modules, so it is now for kallsyms as well. Also continue if one manages to load the vmlinux data but module processing fails, so that at least some analisys can be done with part of the needed symbols. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 161 +++++++++++++++++++++++++++++++++++------------ tools/perf/util/symbol.h | 2 - 2 files changed, 122 insertions(+), 41 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index e88296899470..4dfdefd5ec7e 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -27,6 +27,44 @@ enum dso_origin { static void dsos__add(struct dso *dso); static struct dso *dsos__find(const char *name); +static struct rb_root kernel_maps; + +static void dso__set_symbols_end(struct dso *self) +{ + struct rb_node *nd, *prevnd = rb_first(&self->syms); + + if (prevnd == NULL) + return; + + for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { + struct symbol *prev = rb_entry(prevnd, struct symbol, rb_node), + *curr = rb_entry(nd, struct symbol, rb_node); + + if (prev->end == prev->start) + prev->end = curr->start - 1; + prevnd = nd; + } +} + +static void kernel_maps__fixup_sym_end(void) +{ + struct map *prev, *curr; + struct rb_node *nd, *prevnd = rb_first(&kernel_maps); + + if (prevnd == NULL) + return; + + curr = rb_entry(prevnd, struct map, rb_node); + dso__set_symbols_end(curr->dso); + + for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { + prev = curr; + curr = rb_entry(nd, struct map, rb_node); + prev->end = curr->start - 1; + dso__set_symbols_end(curr->dso); + } +} + static struct symbol *symbol__new(u64 start, u64 len, const char *name, unsigned int priv_size, int v) { @@ -162,10 +200,9 @@ size_t dso__fprintf(struct dso *self, FILE *fp) return ret; } -static int dso__load_kallsyms(struct dso *self, struct map *map, - symbol_filter_t filter, int v) +static int maps__load_kallsyms(symbol_filter_t filter, int use_modules, int v) { - struct rb_node *nd, *prevnd; + struct map *map = kernel_map; char *line = NULL; size_t n; FILE *file = fopen("/proc/kallsyms", "r"); @@ -179,6 +216,7 @@ static int dso__load_kallsyms(struct dso *self, struct map *map, struct symbol *sym; int line_len, len; char symbol_type; + char *module, *symbol_name; line_len = getline(&line, &n, file); if (line_len < 0) @@ -201,40 +239,50 @@ static int dso__load_kallsyms(struct dso *self, struct map *map, */ if (symbol_type != 'T' && symbol_type != 'W') continue; + + symbol_name = line + len + 2; + module = strchr(symbol_name, '\t'); + if (module) { + char *module_name_end; + + if (!use_modules) + continue; + *module = '\0'; + module = strchr(module + 1, '['); + if (!module) + continue; + module_name_end = strchr(module + 1, ']'); + if (!module_name_end) + continue; + *(module_name_end + 1) = '\0'; + if (strcmp(map->dso->name, module)) { + map = kernel_maps__find_by_dso_name(module); + if (!map) { + fputs("/proc/{kallsyms,modules} " + "inconsistency!\n", stderr); + return -1; + } + } + start = map->map_ip(map, start); + } else + map = kernel_map; /* * Well fix up the end later, when we have all sorted. */ - sym = symbol__new(start, 0xdead, line + len + 2, - self->sym_priv_size, v); + sym = symbol__new(start, 0, symbol_name, + map->dso->sym_priv_size, v); if (sym == NULL) goto out_delete_line; if (filter && filter(map, sym)) - symbol__delete(sym, self->sym_priv_size); + symbol__delete(sym, map->dso->sym_priv_size); else { - dso__insert_symbol(self, sym); + dso__insert_symbol(map->dso, sym); count++; } } - /* - * Now that we have all sorted out, just set the ->end of all - * symbols - */ - prevnd = rb_first(&self->syms); - - if (prevnd == NULL) - goto out_delete_line; - - for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { - struct symbol *prev = rb_entry(prevnd, struct symbol, rb_node), - *curr = rb_entry(nd, struct symbol, rb_node); - - prev->end = curr->start - 1; - prevnd = nd; - } - free(line); fclose(file); @@ -246,6 +294,24 @@ out_failure: return -1; } +static size_t kernel_maps__fprintf(FILE *fp) +{ + size_t printed = fprintf(stderr, "Kernel maps:\n"); + struct rb_node *nd; + + printed += map__fprintf(kernel_map, fp); + printed += dso__fprintf(kernel_map->dso, fp); + + for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) { + struct map *pos = rb_entry(nd, struct map, rb_node); + + printed += map__fprintf(pos, fp); + printed += dso__fprintf(pos->dso, fp); + } + + return printed + fprintf(stderr, "END kernel maps\n"); +} + static int dso__load_perf_map(struct dso *self, struct map *map, symbol_filter_t filter, int v) { @@ -598,6 +664,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, char *demangled; int is_label = elf_sym__is_label(&sym); const char *section_name; + u64 sh_offset = 0; if (!is_label && !elf_sym__is_function(&sym)) continue; @@ -613,14 +680,18 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, section_name = elf_sec__name(&shdr, secstrs); + if ((kernel || kmodule)) { + if (strstr(section_name, ".init")) + sh_offset = shdr.sh_offset; + } + if (self->adjust_symbols) { if (v >= 2) printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n", (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset); sym.st_value -= shdr.sh_addr - shdr.sh_offset; - } else if (kmodule) - sym.st_value += shdr.sh_offset; + } /* * We need to figure out if the object was created from C++ sources * DWARF DW_compile_unit has this, but we don't always have access @@ -631,7 +702,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, if (demangled != NULL) elf_name = demangled; - f = symbol__new(sym.st_value, sym.st_size, elf_name, + f = symbol__new(sym.st_value + sh_offset, sym.st_size, elf_name, self->sym_priv_size, v); free(demangled); if (!f) @@ -804,7 +875,6 @@ out: return ret; } -static struct rb_root kernel_maps; struct map *kernel_map; static void kernel_maps__insert(struct map *map) @@ -975,8 +1045,7 @@ static struct map *map__new2(u64 start, struct dso *dso) return self; } -int dsos__load_modules(unsigned int sym_priv_size, - symbol_filter_t filter, int v) +static int dsos__load_modules(unsigned int sym_priv_size) { char *line = NULL; size_t n; @@ -1034,8 +1103,7 @@ int dsos__load_modules(unsigned int sym_priv_size, free(line); fclose(file); - v = 1; - return dsos__load_modules_sym(filter, v); + return 0; out_delete_line: free(line); @@ -1075,25 +1143,37 @@ int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, kernel_map->map_ip = vdso__map_ip; + if (use_modules && dsos__load_modules(sym_priv_size) < 0) { + fprintf(stderr, "Failed to load list of modules in use! " + "Continuing...\n"); + use_modules = 0; + } + if (vmlinux) { err = dso__load_vmlinux(dso, kernel_map, vmlinux, filter, v); if (err > 0 && use_modules) { - int syms = dsos__load_modules(sym_priv_size, filter, v); + int syms = dsos__load_modules_sym(filter, v); - if (syms < 0) { - fprintf(stderr, "dsos__load_modules failed!\n"); - return syms; - } - err += syms; + if (syms < 0) + fprintf(stderr, "Failed to read module symbols!" + " Continuing...\n"); + else + err += syms; } } if (err <= 0) - err = dso__load_kallsyms(dso, kernel_map, filter, v); + err = maps__load_kallsyms(filter, use_modules, v); if (err > 0) { struct rb_node *node = rb_first(&dso->syms); struct symbol *sym = rb_entry(node, struct symbol, rb_node); + /* + * Now that we have all sorted out, just set the ->end of all + * symbols that still don't have it. + */ + dso__set_symbols_end(dso); + kernel_maps__fixup_sym_end(); kernel_map->start = sym->start; node = rb_last(&dso->syms); @@ -1106,6 +1186,9 @@ int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, * kernel_maps__insert(kernel_map) */ dsos__add(dso); + + if (v > 0) + kernel_maps__fprintf(stderr); } return err; diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 5339fd82ec96..2e4522edeb07 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -70,8 +70,6 @@ struct symbol *dso__find_symbol(struct dso *self, u64 ip); int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, symbol_filter_t filter, int verbose, int modules); -int dsos__load_modules(unsigned int sym_priv_size, symbol_filter_t filter, - int verbose); int dso__load(struct dso *self, struct map *map, symbol_filter_t filter, int verbose); struct dso *dsos__findnew(const char *name); -- cgit v1.2.3 From a2a99e8e12798706ec1026e5d8fc36f7c86122ce Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 5 Oct 2009 14:26:18 -0300 Subject: perf tools: /proc/modules names don't always match its name MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit $ cut -d' ' -f1 /proc/modules|grep _|wc -l 29 $ cut -d' ' -f1 /proc/modules|grep _|sed 's/$/.ko'/g|while read n;do find /lib/modules/`uname -r` -name $n;done|wc -l 12 For instance: $ grep ^aes_x86 /proc/modules aes_x86_64 9056 2 - Live 0xffffffffa0091000 $ l /lib/modules/2.6.31-tip/kernel/arch/x86/crypto/aes-x86_64.ko -rw-r--r-- 1 root root 136438 2009-09-22 19:05 /lib/modules/2.6.31-tip/kernel/arch/x86/crypto/aes-x86_64.ko Handle that by introducing a strxfrchar routine that replaces dashes with underscores when matching file names to loaded modules. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/util/string.c | 11 +++++++++++ tools/perf/util/string.h | 1 + tools/perf/util/symbol.c | 3 ++- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c index c93eca9a7be3..04743d3e9039 100644 --- a/tools/perf/util/string.c +++ b/tools/perf/util/string.c @@ -1,3 +1,4 @@ +#include #include "string.h" static int hex(char ch) @@ -32,3 +33,13 @@ int hex2u64(const char *ptr, u64 *long_val) return p - ptr; } + +char *strxfrchar(char *s, char from, char to) +{ + char *p = s; + + while ((p = strchr(p, from)) != NULL) + *p++ = to; + + return s; +} diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h index 15c827475e7d..2c84bf65ba0f 100644 --- a/tools/perf/util/string.h +++ b/tools/perf/util/string.h @@ -4,6 +4,7 @@ #include "types.h" int hex2u64(const char *ptr, u64 *val); +char *strxfrchar(char *s, char from, char to); #define _STR(x) #x #define STR(x) _STR(x) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 4dfdefd5ec7e..e3eebdd682d9 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -189,7 +189,7 @@ struct symbol *dso__find_symbol(struct dso *self, u64 ip) size_t dso__fprintf(struct dso *self, FILE *fp) { - size_t ret = fprintf(fp, "dso: %s\n", self->long_name); + size_t ret = fprintf(fp, "dso: %s\n", self->short_name); struct rb_node *nd; for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) { @@ -977,6 +977,7 @@ static int dsos__load_modules_sym_dir(char *dirname, snprintf(dso_name, sizeof(dso_name), "[%.*s]", (int)(dot - dent->d_name), dent->d_name); + strxfrchar(dso_name, '-', '_'); map = kernel_maps__find_by_dso_name(dso_name); if (map == NULL) continue; -- cgit v1.2.3 From c3b32fcbc7f4fd9a9b84718b991b175b0fd53f8c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 5 Oct 2009 14:26:16 -0300 Subject: perf report: Use kernel_maps__find_symbol as fallback to find vdsos, etc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In resolve_symbol, as we're moving to breaking the kernel symbols list per address ranges, i.e. kernel linking sections, so that we don't have a big kernel_map that in its range covers what is in the modules. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-report.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index fe4aadc9630f..12f8c868fcd7 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -384,11 +384,8 @@ got_map: * the "[vdso]" dso, but for now lets use the old * trick of looking in the whole kernel symbol list. */ - if ((long long)ip < 0) { - map = kernel_map; - if (mapp) - *mapp = map; - } + if ((long long)ip < 0) + return kernel_maps__find_symbol(ip, mapp); } dump_printf(" ...... dso: %s\n", map ? map->dso->long_name : ""); -- cgit v1.2.3 From 26a50744b21fff65bd754874072857bee8967f4d Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 6 Oct 2009 01:09:50 -0500 Subject: tracing/events: Add 'signed' field to format files The sign info used for filters in the kernel is also useful to applications that process the trace stream. Add it to the format files and make it available to userspace. Signed-off-by: Tom Zanussi Acked-by: Frederic Weisbecker Cc: rostedt@goodmis.org Cc: lizf@cn.fujitsu.com Cc: hch@infradead.org Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <1254809398-8078-2-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- include/trace/ftrace.h | 15 +++++++++------ kernel/trace/ring_buffer.c | 15 +++++++++------ kernel/trace/trace_events.c | 24 ++++++++++++------------ kernel/trace/trace_export.c | 25 ++++++++++++++----------- kernel/trace/trace_syscalls.c | 20 +++++++++++++------- tools/perf/util/trace-event-parse.c | 24 ++++++++++++++++++++++++ tools/perf/util/trace-event.h | 1 + 7 files changed, 82 insertions(+), 42 deletions(-) diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index cc0d9667e182..c9bbcab95fbe 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -120,9 +120,10 @@ #undef __field #define __field(type, item) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ - "offset:%u;\tsize:%u;\n", \ + "offset:%u;\tsize:%u;\tsigned:%u;\n", \ (unsigned int)offsetof(typeof(field), item), \ - (unsigned int)sizeof(field.item)); \ + (unsigned int)sizeof(field.item), \ + (unsigned int)is_signed_type(type)); \ if (!ret) \ return 0; @@ -132,19 +133,21 @@ #undef __array #define __array(type, item, len) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ - "offset:%u;\tsize:%u;\n", \ + "offset:%u;\tsize:%u;\tsigned:%u;\n", \ (unsigned int)offsetof(typeof(field), item), \ - (unsigned int)sizeof(field.item)); \ + (unsigned int)sizeof(field.item), \ + (unsigned int)is_signed_type(type)); \ if (!ret) \ return 0; #undef __dynamic_array #define __dynamic_array(type, item, len) \ ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\ - "offset:%u;\tsize:%u;\n", \ + "offset:%u;\tsize:%u;\tsigned:%u;\n", \ (unsigned int)offsetof(typeof(field), \ __data_loc_##item), \ - (unsigned int)sizeof(field.__data_loc_##item)); \ + (unsigned int)sizeof(field.__data_loc_##item), \ + (unsigned int)is_signed_type(type)); \ if (!ret) \ return 0; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d4ff01970547..e43c928356ee 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -397,18 +397,21 @@ int ring_buffer_print_page_header(struct trace_seq *s) int ret; ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" - "offset:0;\tsize:%u;\n", - (unsigned int)sizeof(field.time_stamp)); + "offset:0;\tsize:%u;\tsigned:%u;\n", + (unsigned int)sizeof(field.time_stamp), + (unsigned int)is_signed_type(u64)); ret = trace_seq_printf(s, "\tfield: local_t commit;\t" - "offset:%u;\tsize:%u;\n", + "offset:%u;\tsize:%u;\tsigned:%u;\n", (unsigned int)offsetof(typeof(field), commit), - (unsigned int)sizeof(field.commit)); + (unsigned int)sizeof(field.commit), + (unsigned int)is_signed_type(long)); ret = trace_seq_printf(s, "\tfield: char data;\t" - "offset:%u;\tsize:%u;\n", + "offset:%u;\tsize:%u;\tsigned:%u;\n", (unsigned int)offsetof(typeof(field), data), - (unsigned int)BUF_PAGE_SIZE); + (unsigned int)BUF_PAGE_SIZE, + (unsigned int)is_signed_type(char)); return ret; } diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index d128f65778e6..cf3cabf6ce14 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -507,7 +507,7 @@ extern char *__bad_type_size(void); #define FIELD(type, name) \ sizeof(type) != sizeof(field.name) ? __bad_type_size() : \ #type, "common_" #name, offsetof(typeof(field), name), \ - sizeof(field.name) + sizeof(field.name), is_signed_type(type) static int trace_write_header(struct trace_seq *s) { @@ -515,17 +515,17 @@ static int trace_write_header(struct trace_seq *s) /* struct trace_entry */ return trace_seq_printf(s, - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\n", - FIELD(unsigned short, type), - FIELD(unsigned char, flags), - FIELD(unsigned char, preempt_count), - FIELD(int, pid), - FIELD(int, lock_depth)); + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" + "\n", + FIELD(unsigned short, type), + FIELD(unsigned char, flags), + FIELD(unsigned char, preempt_count), + FIELD(int, pid), + FIELD(int, lock_depth)); } static ssize_t diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 9753fcc61bc5..31da218ee10f 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -66,44 +66,47 @@ static void __used ____ftrace_check_##name(void) \ #undef __field #define __field(type, item) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ - "offset:%zu;\tsize:%zu;\n", \ + "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ offsetof(typeof(field), item), \ - sizeof(field.item)); \ + sizeof(field.item), is_signed_type(type)); \ if (!ret) \ return 0; #undef __field_desc #define __field_desc(type, container, item) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ - "offset:%zu;\tsize:%zu;\n", \ + "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ offsetof(typeof(field), container.item), \ - sizeof(field.container.item)); \ + sizeof(field.container.item), \ + is_signed_type(type)); \ if (!ret) \ return 0; #undef __array #define __array(type, item, len) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ - "offset:%zu;\tsize:%zu;\n", \ - offsetof(typeof(field), item), \ - sizeof(field.item)); \ + "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ + offsetof(typeof(field), item), \ + sizeof(field.item), is_signed_type(type)); \ if (!ret) \ return 0; #undef __array_desc #define __array_desc(type, container, item, len) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ - "offset:%zu;\tsize:%zu;\n", \ + "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ offsetof(typeof(field), container.item), \ - sizeof(field.container.item)); \ + sizeof(field.container.item), \ + is_signed_type(type)); \ if (!ret) \ return 0; #undef __dynamic_array #define __dynamic_array(type, item) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ - "offset:%zu;\tsize:0;\n", \ - offsetof(typeof(field), item)); \ + "offset:%zu;\tsize:0;\tsigned:%u;\n", \ + offsetof(typeof(field), item), \ + is_signed_type(type)); \ if (!ret) \ return 0; diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 527e17eae575..d99abc427c39 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -103,7 +103,8 @@ extern char *__bad_type_size(void); #define SYSCALL_FIELD(type, name) \ sizeof(type) != sizeof(trace.name) ? \ __bad_type_size() : \ - #type, #name, offsetof(typeof(trace), name), sizeof(trace.name) + #type, #name, offsetof(typeof(trace), name), \ + sizeof(trace.name), is_signed_type(type) int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) { @@ -120,7 +121,8 @@ int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) if (!entry) return 0; - ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", + ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" + "\tsigned:%u;\n", SYSCALL_FIELD(int, nr)); if (!ret) return 0; @@ -130,8 +132,10 @@ int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) entry->args[i]); if (!ret) return 0; - ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;\n", offset, - sizeof(unsigned long)); + ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;" + "\tsigned:%u;\n", offset, + sizeof(unsigned long), + is_signed_type(unsigned long)); if (!ret) return 0; offset += sizeof(unsigned long); @@ -163,8 +167,10 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s) struct syscall_trace_exit trace; ret = trace_seq_printf(s, - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" + "\tsigned:%u;\n" + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" + "\tsigned:%u;\n", SYSCALL_FIELD(int, nr), SYSCALL_FIELD(long, ret)); if (!ret) @@ -212,7 +218,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call) if (ret) return ret; - ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 0, + ret = trace_define_field(call, SYSCALL_FIELD(long, ret), FILTER_OTHER); return ret; diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 55b41b9e3834..be8412d699a1 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -894,6 +894,21 @@ static int event_read_fields(struct event *event, struct format_field **fields) field->size = strtoul(token, NULL, 0); free_token(token); + if (read_expected(EVENT_OP, (char *)";") < 0) + goto fail_expect; + + if (read_expected(EVENT_ITEM, (char *)"signed") < 0) + goto fail_expect; + + if (read_expected(EVENT_OP, (char *)":") < 0) + goto fail_expect; + + if (read_expect_type(EVENT_ITEM, &token)) + goto fail; + if (strtoul(token, NULL, 0)) + field->flags |= FIELD_IS_SIGNED; + free_token(token); + if (read_expected(EVENT_OP, (char *)";") < 0) goto fail_expect; @@ -2843,6 +2858,15 @@ static void parse_header_field(char *type, return; *size = atoi(token); free_token(token); + if (read_expected(EVENT_OP, (char *)";") < 0) + return; + if (read_expected(EVENT_ITEM, (char *)"signed") < 0) + return; + if (read_expected(EVENT_OP, (char *)":") < 0) + return; + if (read_expect_type(EVENT_ITEM, &token) < 0) + return; + free_token(token); if (read_expected(EVENT_OP, (char *)";") < 0) return; if (read_expect_type(EVENT_NEWLINE, &token) < 0) diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index 162c3e6deb93..00b440df66d8 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -26,6 +26,7 @@ enum { enum format_flags { FIELD_IS_ARRAY = 1, FIELD_IS_POINTER = 2, + FIELD_IS_SIGNED = 4, }; struct format_field { -- cgit v1.2.3 From 2774601811bedd04ee7e38624343ea80b4a62d7e Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 6 Oct 2009 01:09:51 -0500 Subject: perf trace: Add subsystem string to struct event Needed to fully qualify event names for event stream processing. Signed-off-by: Tom Zanussi Acked-by: Frederic Weisbecker Cc: rostedt@goodmis.org Cc: lizf@cn.fujitsu.com Cc: hch@infradead.org Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <1254809398-8078-3-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 4 +++- tools/perf/util/trace-event.h | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index be8412d699a1..de3fc8bf8bfe 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -2950,7 +2950,7 @@ int parse_ftrace_file(char *buf, unsigned long size) return 0; } -int parse_event_file(char *buf, unsigned long size, char *system__unused __unused) +int parse_event_file(char *buf, unsigned long size, char *sys) { struct event *event; int ret; @@ -2977,6 +2977,8 @@ int parse_event_file(char *buf, unsigned long size, char *system__unused __unuse if (ret < 0) die("failed to read event print fmt"); + event->system = strdup(sys); + #define PRINT_ARGS 0 if (PRINT_ARGS && event->print_fmt.args) print_args(event->print_fmt.args); diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index 00b440df66d8..cb92978be300 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -133,6 +133,7 @@ struct event { int flags; struct format format; struct print_fmt print_fmt; + char *system; }; enum { @@ -167,7 +168,7 @@ void print_funcs(void); void print_printk(void); int parse_ftrace_file(char *buf, unsigned long size); -int parse_event_file(char *buf, unsigned long size, char *system); +int parse_event_file(char *buf, unsigned long size, char *sys); void print_event(int cpu, void *data, int size, unsigned long long nsecs, char *comm); -- cgit v1.2.3 From 064739bc4b3d7f424b2f25547e6611bcf0132415 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 6 Oct 2009 01:09:52 -0500 Subject: perf trace: Add string/dynamic cases to format_flags Needed for distinguishing string fields in event stream processing. Signed-off-by: Tom Zanussi Acked-by: Frederic Weisbecker Cc: rostedt@goodmis.org Cc: lizf@cn.fujitsu.com Cc: hch@infradead.org Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <1254809398-8078-4-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 24 ++++++++++++++++++++++++ tools/perf/util/trace-event.h | 2 ++ 2 files changed, 26 insertions(+) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index de3fc8bf8bfe..6f851f98b5b4 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -721,6 +721,24 @@ static int event_read_id(void) return -1; } +static int field_is_string(struct format_field *field) +{ + if ((field->flags & FIELD_IS_ARRAY) && + (!strstr(field->type, "char") || !strstr(field->type, "u8") || + !strstr(field->type, "s8"))) + return 1; + + return 0; +} + +static int field_is_dynamic(struct format_field *field) +{ + if (!strcmp(field->type, "__data_loc")) + return 1; + + return 0; +} + static int event_read_fields(struct event *event, struct format_field **fields) { struct format_field *field = NULL; @@ -865,6 +883,12 @@ static int event_read_fields(struct event *event, struct format_field **fields) free(brackets); } + if (field_is_string(field)) { + field->flags |= FIELD_IS_STRING; + if (field_is_dynamic(field)) + field->flags |= FIELD_IS_DYNAMIC; + } + if (test_type_token(type, token, EVENT_OP, (char *)";")) goto fail; free_token(token); diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index cb92978be300..5f59a39fb88b 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -27,6 +27,8 @@ enum format_flags { FIELD_IS_ARRAY = 1, FIELD_IS_POINTER = 2, FIELD_IS_SIGNED = 4, + FIELD_IS_STRING = 8, + FIELD_IS_DYNAMIC = 16, }; struct format_field { -- cgit v1.2.3 From 42e59d7d19dc4b49feab2a860fd9a8ca3248c833 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 6 Oct 2009 15:14:21 +0200 Subject: perf tools: Default to 1 KHz auto-sampling freq events Use auto-freq events by default in perf record and perf top. This allows more consistent hardware event sampling, regardless of the intensity of the underlying event. It also keeps us from over-sampling on larger/busier systems. (also make surrounding initializations more consistent) Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 52 ++++++++++++++++++++++----------------------- tools/perf/builtin-top.c | 38 ++++++++++++++++----------------- 2 files changed, 45 insertions(+), 45 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 3eeef339c787..494f8c7d7521 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -29,43 +29,43 @@ static int fd[MAX_NR_CPUS][MAX_COUNTERS]; static long default_interval = 100000; -static int nr_cpus = 0; +static int nr_cpus = 0; static unsigned int page_size; -static unsigned int mmap_pages = 128; -static int freq = 0; +static unsigned int mmap_pages = 128; +static int freq = 1000; static int output; static const char *output_name = "perf.data"; -static int group = 0; -static unsigned int realtime_prio = 0; -static int raw_samples = 0; -static int system_wide = 0; -static int profile_cpu = -1; -static pid_t target_pid = -1; -static pid_t child_pid = -1; -static int inherit = 1; -static int force = 0; -static int append_file = 0; -static int call_graph = 0; -static int inherit_stat = 0; -static int no_samples = 0; -static int sample_address = 0; -static int multiplex = 0; -static int multiplex_fd = -1; - -static long samples; +static int group = 0; +static unsigned int realtime_prio = 0; +static int raw_samples = 0; +static int system_wide = 0; +static int profile_cpu = -1; +static pid_t target_pid = -1; +static pid_t child_pid = -1; +static int inherit = 1; +static int force = 0; +static int append_file = 0; +static int call_graph = 0; +static int inherit_stat = 0; +static int no_samples = 0; +static int sample_address = 0; +static int multiplex = 0; +static int multiplex_fd = -1; + +static long samples = 0; static struct timeval last_read; static struct timeval this_read; -static u64 bytes_written; +static u64 bytes_written = 0; static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; -static int nr_poll; -static int nr_cpu; +static int nr_poll = 0; +static int nr_cpu = 0; -static int file_new = 1; +static int file_new = 1; -struct perf_header *header; +struct perf_header *header = NULL; struct mmap_data { int counter; diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index c574c5b3d0e6..d978dc99236c 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -55,26 +55,26 @@ static int fd[MAX_NR_CPUS][MAX_COUNTERS]; -static int system_wide = 0; +static int system_wide = 0; static int default_interval = 100000; -static int count_filter = 5; -static int print_entries = 15; +static int count_filter = 5; +static int print_entries = 15; -static int target_pid = -1; -static int inherit = 0; -static int profile_cpu = -1; -static int nr_cpus = 0; -static unsigned int realtime_prio = 0; -static int group = 0; +static int target_pid = -1; +static int inherit = 0; +static int profile_cpu = -1; +static int nr_cpus = 0; +static unsigned int realtime_prio = 0; +static int group = 0; static unsigned int page_size; -static unsigned int mmap_pages = 16; -static int freq = 0; +static unsigned int mmap_pages = 16; +static int freq = 1000; /* 1 KHz */ -static int delay_secs = 2; -static int zero; -static int dump_symtab; +static int delay_secs = 2; +static int zero = 0; +static int dump_symtab = 0; /* * Source @@ -87,11 +87,11 @@ struct source_line { struct source_line *next; }; -static char *sym_filter = NULL; -struct sym_entry *sym_filter_entry = NULL; -static int sym_pcnt_filter = 5; -static int sym_counter = 0; -static int display_weighted = -1; +static char *sym_filter = NULL; +struct sym_entry *sym_filter_entry = NULL; +static int sym_pcnt_filter = 5; +static int sym_counter = 0; +static int display_weighted = -1; /* * Symbols -- cgit v1.2.3 From b209aa1f83964d49a332a7b6b818ebede5cdc6ef Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 6 Oct 2009 21:21:26 +0200 Subject: perf tools: Start the perf.data mapping at data offset in perf trace Currently, we are mapping perf.data in the beginning of the file and use the data offset as a buffer offset. This may exceed the mapping area if the data offset is upper than page_size * mmap_window and result in a page fault (thing that happen if we merge trace.info in perf.data). Instead, let's start the mapping in the page that matches our data offset. v2: Drop a junk from another patch (trace_report() removal) Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Tom Zanussi LKML-Reference: <1254856886-10348-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-trace.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 5d4c84d86373..d573d4ea6c21 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -143,6 +143,7 @@ static int __cmd_trace(void) int ret, rc = EXIT_FAILURE; unsigned long offset = 0; unsigned long head = 0; + unsigned long shift; struct stat perf_stat; event_t *event; uint32_t size; @@ -180,6 +181,10 @@ static int __cmd_trace(void) return EXIT_FAILURE; } + shift = page_size * (head / page_size); + offset += shift; + head -= shift; + remap: buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, MAP_SHARED, input, offset); @@ -192,9 +197,9 @@ more: event = (event_t *)(buf + head); if (head + event->header.size >= page_size * mmap_window) { - unsigned long shift = page_size * (head / page_size); int res; + shift = page_size * (head / page_size); res = munmap(buf, page_size * mmap_window); assert(res == 0); -- cgit v1.2.3 From 03456a158d9067d2f657bec170506009db81756d Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 6 Oct 2009 23:36:47 +0200 Subject: perf tools: Merge trace.info content into perf.data This drops the trace.info file and move its contents into the common perf.data file. This is done by creating a new trace_info section into this file. A user of perf headers needs to call perf_header__set_trace_info() to save the trace meta informations into the perf.data file. A file created by perf after his patch is unsupported by previous version because the size of the headers have increased. That said, it's two new fields that have been added in the end of the headers, and those could be ignored by previous versions if they just handled the dynamic header size and then ignore the unknow part. The offsets guarantee the compatibility. We'll do a -stable fix for that. But current previous versions handle the header size using its static size, not dynamic, then it's not backward compatible with trace records. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Mike Galbraith Cc: Paul Mackerras LKML-Reference: <20091006213643.GA5343@nowhere> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 7 +++---- tools/perf/builtin-sched.c | 1 - tools/perf/builtin-trace.c | 1 - tools/perf/util/header.c | 42 ++++++++++++++++++++++++++++++++++++++ tools/perf/util/header.h | 4 +++- tools/perf/util/trace-event-info.c | 6 ++---- tools/perf/util/trace-event-read.c | 7 ++----- tools/perf/util/trace-event.h | 4 ++-- 8 files changed, 54 insertions(+), 18 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 494f8c7d7521..59af03d80d07 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -17,7 +17,6 @@ #include "util/header.h" #include "util/event.h" #include "util/debug.h" -#include "util/trace-event.h" #include #include @@ -566,17 +565,17 @@ static int __cmd_record(int argc, const char **argv) else header = perf_header__new(); - if (raw_samples) { - read_tracing_data(attrs, nr_counters); + perf_header__set_trace_info(); } else { for (i = 0; i < nr_counters; i++) { if (attrs[i].sample_type & PERF_SAMPLE_RAW) { - read_tracing_data(attrs, nr_counters); + perf_header__set_trace_info(); break; } } } + atexit(atexit_header); if (!system_wide) { diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 4470f2535706..18871380b015 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1634,7 +1634,6 @@ static int read_events(void) uint32_t size; char *buf; - trace_report(); register_idle_thread(&threads, &last_match); input = open(input_name, O_RDONLY); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index d573d4ea6c21..d9abb4ae5f79 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -149,7 +149,6 @@ static int __cmd_trace(void) uint32_t size; char *buf; - trace_report(); register_idle_thread(&threads, &last_match); input = open(input_name, O_RDONLY); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index e306857b2c2b..212fade7ee74 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -5,6 +5,8 @@ #include "util.h" #include "header.h" +#include "../perf.h" +#include "trace-event.h" /* * Create new perf.data header attribute: @@ -62,6 +64,8 @@ struct perf_header *perf_header__new(void) self->data_offset = 0; self->data_size = 0; + self->trace_info_offset = 0; + self->trace_info_size = 0; return self; } @@ -145,8 +149,16 @@ struct perf_file_header { struct perf_file_section attrs; struct perf_file_section data; struct perf_file_section event_types; + struct perf_file_section trace_info; }; +static int trace_info; + +void perf_header__set_trace_info(void) +{ + trace_info = 1; +} + static void do_write(int fd, void *buf, size_t size) { while (size) { @@ -198,6 +210,23 @@ void perf_header__write(struct perf_header *self, int fd) if (events) do_write(fd, events, self->event_size); + if (trace_info) { + static int trace_info_written; + + /* + * Write it only once + */ + if (!trace_info_written) { + self->trace_info_offset = lseek(fd, 0, SEEK_CUR); + read_tracing_data(fd, attrs, nr_counters); + self->trace_info_size = lseek(fd, 0, SEEK_CUR) - + self->trace_info_offset; + trace_info_written = 1; + } else { + lseek(fd, self->trace_info_offset + + self->trace_info_size, SEEK_SET); + } + } self->data_offset = lseek(fd, 0, SEEK_CUR); @@ -217,6 +246,10 @@ void perf_header__write(struct perf_header *self, int fd) .offset = self->event_offset, .size = self->event_size, }, + .trace_info = { + .offset = self->trace_info_offset, + .size = self->trace_info_size, + }, }; lseek(fd, 0, SEEK_SET); @@ -290,6 +323,15 @@ struct perf_header *perf_header__read(int fd) do_read(fd, events, f_header.event_types.size); event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); } + + self->trace_info_offset = f_header.trace_info.offset; + self->trace_info_size = f_header.trace_info.size; + + if (self->trace_info_size) { + lseek(fd, self->trace_info_offset, SEEK_SET); + trace_report(fd); + } + self->event_offset = f_header.event_types.offset; self->event_size = f_header.event_types.size; diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index a2916b652a1b..30aee5160dc0 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -21,6 +21,8 @@ struct perf_header { u64 data_size; u64 event_offset; u64 event_size; + u64 trace_info_offset; + u64 trace_info_size; }; struct perf_header *perf_header__read(int fd); @@ -40,7 +42,7 @@ void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); u64 perf_header__sample_type(struct perf_header *header); struct perf_event_attr * perf_header__find_attr(u64 id, struct perf_header *header); - +void perf_header__set_trace_info(void); struct perf_header *perf_header__new(void); diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index af4b0573b37f..831052d4b4fb 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c @@ -496,14 +496,12 @@ get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events) return path.next; } -void read_tracing_data(struct perf_event_attr *pattrs, int nb_events) +void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events) { char buf[BUFSIZ]; struct tracepoint_path *tps; - output_fd = open(output_file, O_WRONLY | O_CREAT | O_TRUNC | O_LARGEFILE, 0644); - if (output_fd < 0) - die("creating file '%s'", output_file); + output_fd = fd; buf[0] = 23; buf[1] = 8; diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c index 1b5c847d2c22..44292e06cca4 100644 --- a/tools/perf/util/trace-event-read.c +++ b/tools/perf/util/trace-event-read.c @@ -458,9 +458,8 @@ struct record *trace_read_data(int cpu) return data; } -void trace_report(void) +void trace_report(int fd) { - const char *input_file = "trace.info"; char buf[BUFSIZ]; char test[] = { 23, 8, 68 }; char *version; @@ -468,9 +467,7 @@ void trace_report(void) int show_funcs = 0; int show_printk = 0; - input_fd = open(input_file, O_RDONLY); - if (input_fd < 0) - die("opening '%s'\n", input_file); + input_fd = fd; read_or_die(buf, 3); if (memcmp(buf, test, 3) != 0) diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index 5f59a39fb88b..da77e073c867 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -158,7 +158,7 @@ struct record *trace_read_data(int cpu); void parse_set_info(int nr_cpus, int long_sz); -void trace_report(void); +void trace_report(int fd); void *malloc_or_die(unsigned int size); @@ -244,6 +244,6 @@ unsigned long long raw_field_value(struct event *event, const char *name, void *data); void *raw_field_ptr(struct event *event, const char *name, void *data); -void read_tracing_data(struct perf_event_attr *pattrs, int nb_events); +void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events); #endif /* __PERF_TRACE_EVENTS_H */ -- cgit v1.2.3 From 016e92fbc9ef33689cf654f343a94383d43235e7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 7 Oct 2009 12:47:31 +0200 Subject: perf tools: Unify perf.data mapping and events handling This librarizes the perf.data file mapping and handling in various perf tools, roughly reducing the amount of code and fixing the places that mmap from beginning of the file whereas we want to mmap from the beginning of the data, leading to page fault because the mmap window is too small since the trace info are written in the file too. TODO: - convert perf timechart too Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arjan van de Ven LKML-Reference: <20091007104729.GD5043@nowhere> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 + tools/perf/builtin-report.c | 211 +++++++---------------------------------- tools/perf/builtin-sched.c | 140 ++++++---------------------- tools/perf/builtin-trace.c | 129 ++++--------------------- tools/perf/util/data_map.c | 222 ++++++++++++++++++++++++++++++++++++++++++++ tools/perf/util/data_map.h | 31 +++++++ 6 files changed, 334 insertions(+), 401 deletions(-) create mode 100644 tools/perf/util/data_map.c create mode 100644 tools/perf/util/data_map.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 5a429966c995..495eb6d97fa0 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -342,6 +342,7 @@ LIB_H += util/values.h LIB_H += util/sort.h LIB_H += util/hist.h LIB_H += util/thread.h +LIB_H += util/data_map.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o @@ -378,6 +379,7 @@ LIB_OBJS += util/trace-event-info.o LIB_OBJS += util/svghelper.o LIB_OBJS += util/sort.o LIB_OBJS += util/hist.o +LIB_OBJS += util/data_map.o BUILTIN_OBJS += builtin-annotate.o BUILTIN_OBJS += builtin-help.o diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 12f8c868fcd7..87c4582303bf 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -26,6 +26,7 @@ #include "util/parse-options.h" #include "util/parse-events.h" +#include "util/data_map.h" #include "util/thread.h" #include "util/sort.h" #include "util/hist.h" @@ -37,7 +38,6 @@ static char *dso_list_str, *comm_list_str, *sym_list_str, static struct strlist *dso_list, *comm_list, *sym_list; static int force; -static int input; static int full_paths; static int show_nr_samples; @@ -48,15 +48,11 @@ static struct perf_read_values show_threads_values; static char default_pretty_printing_style[] = "normal"; static char *pretty_printing_style = default_pretty_printing_style; -static unsigned long page_size; -static unsigned long mmap_window = 32; - static int exclude_other = 1; static char callchain_default_opt[] = "fractal,0.5"; -static char __cwd[PATH_MAX]; -static char *cwd = __cwd; +static char *cwd; static int cwdlen; static struct rb_root threads; @@ -815,208 +811,71 @@ process_read_event(event_t *event, unsigned long offset, unsigned long head) return 0; } -static int -process_event(event_t *event, unsigned long offset, unsigned long head) -{ - trace_event(event); - - switch (event->header.type) { - case PERF_RECORD_SAMPLE: - return process_sample_event(event, offset, head); - - case PERF_RECORD_MMAP: - return process_mmap_event(event, offset, head); - - case PERF_RECORD_COMM: - return process_comm_event(event, offset, head); - - case PERF_RECORD_FORK: - case PERF_RECORD_EXIT: - return process_task_event(event, offset, head); - - case PERF_RECORD_LOST: - return process_lost_event(event, offset, head); - - case PERF_RECORD_READ: - return process_read_event(event, offset, head); - - /* - * We dont process them right now but they are fine: - */ - - case PERF_RECORD_THROTTLE: - case PERF_RECORD_UNTHROTTLE: - return 0; - - default: - return -1; - } - - return 0; -} - -static int __cmd_report(void) +static int sample_type_check(u64 type) { - int ret, rc = EXIT_FAILURE; - unsigned long offset = 0; - unsigned long head, shift; - struct stat input_stat; - struct thread *idle; - event_t *event; - uint32_t size; - char *buf; - - idle = register_idle_thread(&threads, &last_match); - thread__comm_adjust(idle); - - if (show_threads) - perf_read_values_init(&show_threads_values); - - input = open(input_name, O_RDONLY); - if (input < 0) { - fprintf(stderr, " failed to open file: %s", input_name); - if (!strcmp(input_name, "perf.data")) - fprintf(stderr, " (try 'perf record' first)"); - fprintf(stderr, "\n"); - exit(-1); - } - - ret = fstat(input, &input_stat); - if (ret < 0) { - perror("failed to stat file"); - exit(-1); - } - - if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { - fprintf(stderr, "file: %s not owned by current user or root\n", input_name); - exit(-1); - } - - if (!input_stat.st_size) { - fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); - } - - header = perf_header__read(input); - head = header->data_offset; - - sample_type = perf_header__sample_type(header); + sample_type = type; if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { if (sort__has_parent) { fprintf(stderr, "selected --sort parent, but no" " callchain data. Did you call" " perf record without -g?\n"); - exit(-1); + return -1; } if (callchain) { fprintf(stderr, "selected -g but no callchain data." " Did you call perf record without" " -g?\n"); - exit(-1); + return -1; } } else if (callchain_param.mode != CHAIN_NONE && !callchain) { callchain = 1; if (register_callchain_param(&callchain_param) < 0) { fprintf(stderr, "Can't register callchain" " params\n"); - exit(-1); + return -1; } } - if (load_kernel() < 0) { - perror("failed to load kernel symbols"); - return EXIT_FAILURE; - } - - if (!full_paths) { - if (getcwd(__cwd, sizeof(__cwd)) == NULL) { - perror("failed to get the current directory"); - return EXIT_FAILURE; - } - cwdlen = strlen(cwd); - } else { - cwd = NULL; - cwdlen = 0; - } - - shift = page_size * (head / page_size); - offset += shift; - head -= shift; - -remap: - buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, - MAP_SHARED, input, offset); - if (buf == MAP_FAILED) { - perror("failed to mmap file"); - exit(-1); - } - -more: - event = (event_t *)(buf + head); - - size = event->header.size; - if (!size) - size = 8; - - if (head + event->header.size >= page_size * mmap_window) { - int munmap_ret; - - shift = page_size * (head / page_size); - - munmap_ret = munmap(buf, page_size * mmap_window); - assert(munmap_ret == 0); - - offset += shift; - head -= shift; - goto remap; - } - - size = event->header.size; - - dump_printf("\n%p [%p]: event: %d\n", - (void *)(offset + head), - (void *)(long)event->header.size, - event->header.type); - - if (!size || process_event(event, offset, head) < 0) { - - dump_printf("%p [%p]: skipping unknown header type: %d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.type); - - total_unknown++; + return 0; +} - /* - * assume we lost track of the stream, check alignment, and - * increment a single u64 in the hope to catch on again 'soon'. - */ +static struct perf_file_handler file_handler = { + .process_sample_event = process_sample_event, + .process_mmap_event = process_mmap_event, + .process_comm_event = process_comm_event, + .process_exit_event = process_task_event, + .process_fork_event = process_task_event, + .process_lost_event = process_lost_event, + .process_read_event = process_read_event, + .sample_type_check = sample_type_check, +}; - if (unlikely(head & 7)) - head &= ~7ULL; - size = 8; - } +static int __cmd_report(void) +{ + struct thread *idle; + int ret; - head += size; + idle = register_idle_thread(&threads, &last_match); + thread__comm_adjust(idle); - if (offset + head >= header->data_offset + header->data_size) - goto done; + if (show_threads) + perf_read_values_init(&show_threads_values); - if (offset + head < (unsigned long)input_stat.st_size) - goto more; + register_perf_file_handler(&file_handler); -done: - rc = EXIT_SUCCESS; - close(input); + ret = mmap_dispatch_perf_file(&header, input_name, force, full_paths, + &cwdlen, &cwd); + if (ret) + return ret; dump_printf(" IP events: %10ld\n", total); dump_printf(" mmap events: %10ld\n", total_mmap); dump_printf(" comm events: %10ld\n", total_comm); dump_printf(" fork events: %10ld\n", total_fork); dump_printf(" lost events: %10ld\n", total_lost); - dump_printf(" unknown events: %10ld\n", total_unknown); + dump_printf(" unknown events: %10ld\n", file_handler.total_unknown); if (dump_trace) return 0; @@ -1034,7 +893,7 @@ done: if (show_threads) perf_read_values_destroy(&show_threads_values); - return rc; + return ret; } static int @@ -1177,8 +1036,6 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) { symbol__init(); - page_size = getpagesize(); - argc = parse_options(argc, argv, options, report_usage, 0); setup_sorting(); diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 18871380b015..e1df7055ab82 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -11,6 +11,7 @@ #include "util/trace-event.h" #include "util/debug.h" +#include "util/data_map.h" #include #include @@ -20,9 +21,6 @@ #include static char const *input_name = "perf.data"; -static int input; -static unsigned long page_size; -static unsigned long mmap_window = 32; static unsigned long total_comm = 0; @@ -35,6 +33,9 @@ static u64 sample_type; static char default_sort_order[] = "avg, max, switch, runtime"; static char *sort_order = default_sort_order; +static char *cwd; +static int cwdlen; + #define PR_SET_NAME 15 /* Set process name */ #define MAX_CPUS 4096 @@ -1594,129 +1595,43 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) } static int -process_event(event_t *event, unsigned long offset, unsigned long head) +process_lost_event(event_t *event __used, + unsigned long offset __used, + unsigned long head __used) { - trace_event(event); - - nr_events++; - switch (event->header.type) { - case PERF_RECORD_MMAP: - return 0; - case PERF_RECORD_LOST: - nr_lost_chunks++; - nr_lost_events += event->lost.lost; - return 0; - - case PERF_RECORD_COMM: - return process_comm_event(event, offset, head); + nr_lost_chunks++; + nr_lost_events += event->lost.lost; - case PERF_RECORD_EXIT ... PERF_RECORD_READ: - return 0; + return 0; +} - case PERF_RECORD_SAMPLE: - return process_sample_event(event, offset, head); +static int sample_type_check(u64 type) +{ + sample_type = type; - case PERF_RECORD_MAX: - default: + if (!(sample_type & PERF_SAMPLE_RAW)) { + fprintf(stderr, + "No trace sample to read. Did you call perf record " + "without -R?"); return -1; } return 0; } +static struct perf_file_handler file_handler = { + .process_sample_event = process_sample_event, + .process_comm_event = process_comm_event, + .process_lost_event = process_lost_event, + .sample_type_check = sample_type_check, +}; + static int read_events(void) { - int ret, rc = EXIT_FAILURE; - unsigned long offset = 0; - unsigned long head = 0; - struct stat perf_stat; - event_t *event; - uint32_t size; - char *buf; - register_idle_thread(&threads, &last_match); + register_perf_file_handler(&file_handler); - input = open(input_name, O_RDONLY); - if (input < 0) { - perror("failed to open file"); - exit(-1); - } - - ret = fstat(input, &perf_stat); - if (ret < 0) { - perror("failed to stat file"); - exit(-1); - } - - if (!perf_stat.st_size) { - fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); - } - header = perf_header__read(input); - head = header->data_offset; - sample_type = perf_header__sample_type(header); - - if (!(sample_type & PERF_SAMPLE_RAW)) - die("No trace sample to read. Did you call perf record " - "without -R?"); - - if (load_kernel() < 0) { - perror("failed to load kernel symbols"); - return EXIT_FAILURE; - } - -remap: - buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, - MAP_SHARED, input, offset); - if (buf == MAP_FAILED) { - perror("failed to mmap file"); - exit(-1); - } - -more: - event = (event_t *)(buf + head); - - size = event->header.size; - if (!size) - size = 8; - - if (head + event->header.size >= page_size * mmap_window) { - unsigned long shift = page_size * (head / page_size); - int res; - - res = munmap(buf, page_size * mmap_window); - assert(res == 0); - - offset += shift; - head -= shift; - goto remap; - } - - size = event->header.size; - - - if (!size || process_event(event, offset, head) < 0) { - - /* - * assume we lost track of the stream, check alignment, and - * increment a single u64 in the hope to catch on again 'soon'. - */ - - if (unlikely(head & 7)) - head &= ~7ULL; - - size = 8; - } - - head += size; - - if (offset + head < (unsigned long)perf_stat.st_size) - goto more; - - rc = EXIT_SUCCESS; - close(input); - - return rc; + return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd); } static void print_bad_events(void) @@ -1934,7 +1849,6 @@ static int __cmd_record(int argc, const char **argv) int cmd_sched(int argc, const char **argv, const char *prefix __used) { symbol__init(); - page_size = getpagesize(); argc = parse_options(argc, argv, sched_options, sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index d9abb4ae5f79..fb3f3c220211 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -12,11 +12,9 @@ #include "util/debug.h" #include "util/trace-event.h" +#include "util/data_map.h" static char const *input_name = "perf.data"; -static int input; -static unsigned long page_size; -static unsigned long mmap_window = 32; static unsigned long total = 0; static unsigned long total_comm = 0; @@ -27,6 +25,9 @@ static struct thread *last_match; static struct perf_header *header; static u64 sample_type; +static char *cwd; +static int cwdlen; + static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) @@ -112,125 +113,32 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) return 0; } -static int -process_event(event_t *event, unsigned long offset, unsigned long head) +static int sample_type_check(u64 type) { - trace_event(event); - - switch (event->header.type) { - case PERF_RECORD_MMAP ... PERF_RECORD_LOST: - return 0; - - case PERF_RECORD_COMM: - return process_comm_event(event, offset, head); - - case PERF_RECORD_EXIT ... PERF_RECORD_READ: - return 0; + sample_type = type; - case PERF_RECORD_SAMPLE: - return process_sample_event(event, offset, head); - - case PERF_RECORD_MAX: - default: + if (!(sample_type & PERF_SAMPLE_RAW)) { + fprintf(stderr, + "No trace sample to read. Did you call perf record " + "without -R?"); return -1; } return 0; } +static struct perf_file_handler file_handler = { + .process_sample_event = process_sample_event, + .process_comm_event = process_comm_event, + .sample_type_check = sample_type_check, +}; + static int __cmd_trace(void) { - int ret, rc = EXIT_FAILURE; - unsigned long offset = 0; - unsigned long head = 0; - unsigned long shift; - struct stat perf_stat; - event_t *event; - uint32_t size; - char *buf; - register_idle_thread(&threads, &last_match); + register_perf_file_handler(&file_handler); - input = open(input_name, O_RDONLY); - if (input < 0) { - perror("failed to open file"); - exit(-1); - } - - ret = fstat(input, &perf_stat); - if (ret < 0) { - perror("failed to stat file"); - exit(-1); - } - - if (!perf_stat.st_size) { - fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); - } - header = perf_header__read(input); - head = header->data_offset; - sample_type = perf_header__sample_type(header); - - if (!(sample_type & PERF_SAMPLE_RAW)) - die("No trace sample to read. Did you call perf record " - "without -R?"); - - if (load_kernel() < 0) { - perror("failed to load kernel symbols"); - return EXIT_FAILURE; - } - - shift = page_size * (head / page_size); - offset += shift; - head -= shift; - -remap: - buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, - MAP_SHARED, input, offset); - if (buf == MAP_FAILED) { - perror("failed to mmap file"); - exit(-1); - } - -more: - event = (event_t *)(buf + head); - - if (head + event->header.size >= page_size * mmap_window) { - int res; - - shift = page_size * (head / page_size); - res = munmap(buf, page_size * mmap_window); - assert(res == 0); - - offset += shift; - head -= shift; - goto remap; - } - - size = event->header.size; - - if (!size || process_event(event, offset, head) < 0) { - - /* - * assume we lost track of the stream, check alignment, and - * increment a single u64 in the hope to catch on again 'soon'. - */ - - if (unlikely(head & 7)) - head &= ~7ULL; - - size = 8; - } - - head += size; - - if (offset + head < (unsigned long)perf_stat.st_size) - goto more; - - rc = EXIT_SUCCESS; - close(input); - - return rc; + return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd); } static const char * const annotate_usage[] = { @@ -249,7 +157,6 @@ static const struct option options[] = { int cmd_trace(int argc, const char **argv, const char *prefix __used) { symbol__init(); - page_size = getpagesize(); argc = parse_options(argc, argv, options, annotate_usage, 0); if (argc) { diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c new file mode 100644 index 000000000000..242b0555ab91 --- /dev/null +++ b/tools/perf/util/data_map.c @@ -0,0 +1,222 @@ +#include "data_map.h" +#include "symbol.h" +#include "util.h" +#include "debug.h" + + +static struct perf_file_handler *curr_handler; +static unsigned long mmap_window = 32; +static char __cwd[PATH_MAX]; + +static int +process_event_stub(event_t *event __used, + unsigned long offset __used, + unsigned long head __used) +{ + return 0; +} + +void register_perf_file_handler(struct perf_file_handler *handler) +{ + if (!handler->process_sample_event) + handler->process_sample_event = process_event_stub; + if (!handler->process_mmap_event) + handler->process_mmap_event = process_event_stub; + if (!handler->process_comm_event) + handler->process_comm_event = process_event_stub; + if (!handler->process_fork_event) + handler->process_fork_event = process_event_stub; + if (!handler->process_exit_event) + handler->process_exit_event = process_event_stub; + if (!handler->process_lost_event) + handler->process_lost_event = process_event_stub; + if (!handler->process_read_event) + handler->process_read_event = process_event_stub; + if (!handler->process_throttle_event) + handler->process_throttle_event = process_event_stub; + if (!handler->process_unthrottle_event) + handler->process_unthrottle_event = process_event_stub; + + curr_handler = handler; +} + +static int +process_event(event_t *event, unsigned long offset, unsigned long head) +{ + trace_event(event); + + switch (event->header.type) { + case PERF_RECORD_SAMPLE: + return curr_handler->process_sample_event(event, offset, head); + case PERF_RECORD_MMAP: + return curr_handler->process_mmap_event(event, offset, head); + case PERF_RECORD_COMM: + return curr_handler->process_comm_event(event, offset, head); + case PERF_RECORD_FORK: + return curr_handler->process_fork_event(event, offset, head); + case PERF_RECORD_EXIT: + return curr_handler->process_exit_event(event, offset, head); + case PERF_RECORD_LOST: + return curr_handler->process_lost_event(event, offset, head); + case PERF_RECORD_READ: + return curr_handler->process_read_event(event, offset, head); + case PERF_RECORD_THROTTLE: + return curr_handler->process_throttle_event(event, offset, head); + case PERF_RECORD_UNTHROTTLE: + return curr_handler->process_unthrottle_event(event, offset, head); + default: + curr_handler->total_unknown++; + return -1; + } +} + +int mmap_dispatch_perf_file(struct perf_header **pheader, + const char *input_name, + int force, + int full_paths, + int *cwdlen, + char **cwd) +{ + int ret, rc = EXIT_FAILURE; + struct perf_header *header; + unsigned long head, shift; + unsigned long offset = 0; + struct stat input_stat; + size_t page_size; + u64 sample_type; + event_t *event; + uint32_t size; + int input; + char *buf; + + if (!curr_handler) + die("Forgot to register perf file handler"); + + page_size = getpagesize(); + + input = open(input_name, O_RDONLY); + if (input < 0) { + fprintf(stderr, " failed to open file: %s", input_name); + if (!strcmp(input_name, "perf.data")) + fprintf(stderr, " (try 'perf record' first)"); + fprintf(stderr, "\n"); + exit(-1); + } + + ret = fstat(input, &input_stat); + if (ret < 0) { + perror("failed to stat file"); + exit(-1); + } + + if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { + fprintf(stderr, "file: %s not owned by current user or root\n", + input_name); + exit(-1); + } + + if (!input_stat.st_size) { + fprintf(stderr, "zero-sized file, nothing to do!\n"); + exit(0); + } + + *pheader = perf_header__read(input); + header = *pheader; + head = header->data_offset; + + sample_type = perf_header__sample_type(header); + + if (curr_handler->sample_type_check) + if (curr_handler->sample_type_check(sample_type) < 0) + exit(-1); + + if (load_kernel() < 0) { + perror("failed to load kernel symbols"); + return EXIT_FAILURE; + } + + if (!full_paths) { + if (getcwd(__cwd, sizeof(__cwd)) == NULL) { + perror("failed to get the current directory"); + return EXIT_FAILURE; + } + *cwd = __cwd; + *cwdlen = strlen(*cwd); + } else { + *cwd = NULL; + *cwdlen = 0; + } + + shift = page_size * (head / page_size); + offset += shift; + head -= shift; + +remap: + buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, + MAP_SHARED, input, offset); + if (buf == MAP_FAILED) { + perror("failed to mmap file"); + exit(-1); + } + +more: + event = (event_t *)(buf + head); + + size = event->header.size; + if (!size) + size = 8; + + if (head + event->header.size >= page_size * mmap_window) { + int munmap_ret; + + shift = page_size * (head / page_size); + + munmap_ret = munmap(buf, page_size * mmap_window); + assert(munmap_ret == 0); + + offset += shift; + head -= shift; + goto remap; + } + + size = event->header.size; + + dump_printf("\n%p [%p]: event: %d\n", + (void *)(offset + head), + (void *)(long)event->header.size, + event->header.type); + + if (!size || process_event(event, offset, head) < 0) { + + dump_printf("%p [%p]: skipping unknown header type: %d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.type); + + /* + * assume we lost track of the stream, check alignment, and + * increment a single u64 in the hope to catch on again 'soon'. + */ + + if (unlikely(head & 7)) + head &= ~7ULL; + + size = 8; + } + + head += size; + + if (offset + head >= header->data_offset + header->data_size) + goto done; + + if (offset + head < (unsigned long)input_stat.st_size) + goto more; + +done: + rc = EXIT_SUCCESS; + close(input); + + return rc; +} + + diff --git a/tools/perf/util/data_map.h b/tools/perf/util/data_map.h new file mode 100644 index 000000000000..716d1053b074 --- /dev/null +++ b/tools/perf/util/data_map.h @@ -0,0 +1,31 @@ +#ifndef __PERF_DATAMAP_H +#define __PERF_DATAMAP_H + +#include "event.h" +#include "header.h" + +typedef int (*event_type_handler_t)(event_t *, unsigned long, unsigned long); + +struct perf_file_handler { + event_type_handler_t process_sample_event; + event_type_handler_t process_mmap_event; + event_type_handler_t process_comm_event; + event_type_handler_t process_fork_event; + event_type_handler_t process_exit_event; + event_type_handler_t process_lost_event; + event_type_handler_t process_read_event; + event_type_handler_t process_throttle_event; + event_type_handler_t process_unthrottle_event; + int (*sample_type_check)(u64 sample_type); + unsigned long total_unknown; +}; + +void register_perf_file_handler(struct perf_file_handler *handler); +int mmap_dispatch_perf_file(struct perf_header **pheader, + const char *input_name, + int force, + int full_paths, + int *cwdlen, + char **cwd); + +#endif -- cgit v1.2.3 From 9a92b479b2f088ee2d3194243f4c8e59b1b8c9c2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 8 Oct 2009 16:37:12 +0200 Subject: perf tools: Improve thread comm resolution in perf sched When we get sched traces that involve a task that was already created before opening the event, we won't have the comm event for it. So if we can't find the comm event for a given thread, we look at the traces that may contain these informations. Before: ata/1:371 | 0.000 ms | 1 | avg: 3988.693 ms | max: 3988.693 ms | kondemand/1:421 | 0.096 ms | 3 | avg: 345.346 ms | max: 1035.989 ms | kondemand/0:420 | 0.025 ms | 3 | avg: 421.332 ms | max: 964.014 ms | :5124:5124 | 0.103 ms | 5 | avg: 74.082 ms | max: 277.194 ms | :6244:6244 | 0.691 ms | 9 | avg: 125.655 ms | max: 271.306 ms | firefox:5080 | 0.924 ms | 5 | avg: 53.833 ms | max: 257.828 ms | npviewer.bin:6225 | 21.871 ms | 53 | avg: 22.462 ms | max: 220.835 ms | :6245:6245 | 9.631 ms | 21 | avg: 41.864 ms | max: 213.349 ms | After: ata/1:371 | 0.000 ms | 1 | avg: 3988.693 ms | max: 3988.693 ms | kondemand/1:421 | 0.096 ms | 3 | avg: 345.346 ms | max: 1035.989 ms | kondemand/0:420 | 0.025 ms | 3 | avg: 421.332 ms | max: 964.014 ms | firefox:5124 | 0.103 ms | 5 | avg: 74.082 ms | max: 277.194 ms | npviewer.bin:6244 | 0.691 ms | 9 | avg: 125.655 ms | max: 271.306 ms | firefox:5080 | 0.924 ms | 5 | avg: 53.833 ms | max: 257.828 ms | npviewer.bin:6225 | 21.871 ms | 53 | avg: 22.462 ms | max: 220.835 ms | npviewer.bin:6245 | 9.631 ms | 21 | avg: 41.864 ms | max: 213.349 ms | Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras LKML-Reference: <1255012632-7882-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-sched.c | 44 +++++++++++++++++++++++++++++++++++++++----- tools/perf/util/thread.c | 32 +++++++++++++++++++++++++------- tools/perf/util/thread.h | 3 +++ 3 files changed, 67 insertions(+), 12 deletions(-) diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index e1df7055ab82..25b91e784332 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1034,6 +1034,36 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) atoms->nb_atoms++; } +static struct thread * +threads__findnew_from_ctx(u32 pid, struct trace_switch_event *switch_event) +{ + struct thread *th; + + th = threads__findnew_nocomm(pid, &threads, &last_match); + if (th->comm) + return th; + + if (pid == switch_event->prev_pid) + thread__set_comm(th, switch_event->prev_comm); + else + thread__set_comm(th, switch_event->next_comm); + return th; +} + +static struct thread * +threads__findnew_from_wakeup(struct trace_wakeup_event *wakeup_event) +{ + struct thread *th; + + th = threads__findnew_nocomm(wakeup_event->pid, &threads, &last_match); + if (th->comm) + return th; + + thread__set_comm(th, wakeup_event->comm); + + return th; +} + static void latency_switch_event(struct trace_switch_event *switch_event, struct event *event __used, @@ -1059,8 +1089,10 @@ latency_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %Ld < 0 ?\n", delta); - sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); - sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); + sched_out = threads__findnew_from_ctx(switch_event->prev_pid, + switch_event); + sched_in = threads__findnew_from_ctx(switch_event->next_pid, + switch_event); out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); if (!out_events) { @@ -1126,7 +1158,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, if (!wakeup_event->success) return; - wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); + wakee = threads__findnew_from_wakeup(wakeup_event); atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); if (!atoms) { thread_atoms_insert(wakee); @@ -1386,8 +1418,10 @@ map_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %Ld < 0 ?\n", delta); - sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); - sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); + sched_out = threads__findnew_from_ctx(switch_event->prev_pid, + switch_event); + sched_in = threads__findnew_from_ctx(switch_event->next_pid, + switch_event); curr_thread[this_cpu] = sched_in; diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 3b56aebb1f4b..8bd5ca2d2f28 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -6,15 +6,17 @@ #include "util.h" #include "debug.h" -static struct thread *thread__new(pid_t pid) +static struct thread *thread__new(pid_t pid, int set_comm) { struct thread *self = calloc(1, sizeof(*self)); if (self != NULL) { self->pid = pid; - self->comm = malloc(32); - if (self->comm) - snprintf(self->comm, 32, ":%d", self->pid); + if (set_comm) { + self->comm = malloc(32); + if (self->comm) + snprintf(self->comm, 32, ":%d", self->pid); + } self->maps = RB_ROOT; INIT_LIST_HEAD(&self->removed_maps); } @@ -50,8 +52,10 @@ static size_t thread__fprintf(struct thread *self, FILE *fp) return ret; } -struct thread * -threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) +static struct thread * +__threads__findnew(pid_t pid, struct rb_root *threads, + struct thread **last_match, + int set_comm) { struct rb_node **p = &threads->rb_node; struct rb_node *parent = NULL; @@ -80,7 +84,8 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) p = &(*p)->rb_right; } - th = thread__new(pid); + th = thread__new(pid, set_comm); + if (th != NULL) { rb_link_node(&th->rb_node, parent, p); rb_insert_color(&th->rb_node, threads); @@ -90,6 +95,19 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) return th; } +struct thread * +threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) +{ + return __threads__findnew(pid, threads, last_match, 1); +} + +struct thread * +threads__findnew_nocomm(pid_t pid, struct rb_root *threads, + struct thread **last_match) +{ + return __threads__findnew(pid, threads, last_match, 0); +} + struct thread * register_idle_thread(struct rb_root *threads, struct thread **last_match) { diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 845d9b62f96f..75bc843950c4 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -18,6 +18,9 @@ int thread__set_comm(struct thread *self, const char *comm); struct thread * threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match); struct thread * +threads__findnew_nocomm(pid_t pid, struct rb_root *threads, + struct thread **last_match); +struct thread * register_idle_thread(struct rb_root *threads, struct thread **last_match); void thread__insert_map(struct thread *self, struct map *map); int thread__fork(struct thread *self, struct thread *parent); -- cgit v1.2.3 From da21d1b547cbaa2c026cf645753651c25d340923 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 7 Oct 2009 10:49:00 -0300 Subject: perf tools: Up the verbose level for some really verbose stuff Like printing every symbol created. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1254923340-4870-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 4 ++-- tools/perf/builtin-report.c | 4 ++-- tools/perf/util/symbol.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 35ed97bd0c63..8c84320ecb06 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -658,10 +658,10 @@ more: if (dump_trace) return 0; - if (verbose >= 3) + if (verbose > 3) threads__fprintf(stdout, &threads); - if (verbose >= 2) + if (verbose > 2) dsos__fprintf(stdout); collapse__resort(); diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 87c4582303bf..f57a23b19f3c 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -880,10 +880,10 @@ static int __cmd_report(void) if (dump_trace) return 0; - if (verbose >= 3) + if (verbose > 3) threads__fprintf(stdout, &threads); - if (verbose >= 2) + if (verbose > 2) dsos__fprintf(stdout); collapse__resort(); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 582ce72ca4d2..a6887f94dfe7 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -74,7 +74,7 @@ static struct symbol *symbol__new(u64 start, u64 len, const char *name, if (!self) return NULL; - if (v >= 2) + if (v > 2) printf("new symbol: %016Lx [%08lx]: %s, hist: %p\n", start, (unsigned long)len, name, self->hist); @@ -685,7 +685,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, } if (self->adjust_symbols) { - if (v >= 2) + if (v > 2) printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n", (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset); -- cgit v1.2.3 From 2e538c4a1847291cf01218d4fe7bb4dc60fef7cf Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 7 Oct 2009 13:48:56 -0300 Subject: perf tools: Improve kernel/modules symbol lookup This removes the ovelapping of vmlinux addresses with modules, using the ELF section name when using --vmlinux and creating a unique DSO name when using /proc/kallsyms ([kernel].N). This is done by creating multiple 'struct map' instances for address ranges backed by DSOs that have just the symbols for that range and a name that is derived from the ELF section name.o Now it is possible to ask for just the symbols in some particular kernel section: $ perf report -m --vmlinux ../build/tip-recvmmsg/vmlinux \ --dsos [kernel].vsyscall_fn | head -15 52.73% Xorg [.] vread_hpet 18.61% firefox [.] vread_hpet 14.50% npviewer.bin [.] vread_hpet 6.83% compiz [.] vread_hpet 5.73% glxgears [.] vread_hpet 0.63% java [.] vread_hpet 0.30% gnome-terminal [.] vread_hpet 0.23% perf [.] vread_hpet 0.18% xchat [.] vread_hpet $ Now we don't have to first lookup the list of modules and then, if it fails, vmlinux symbols, its just a simple lookup for the map then the symbols, just like for threads. Reports generated using /proc/kallsyms and --vmlinux should provide the same results, modulo the DSO name for sections other than ".text". But they don't right now because things like: ffffffff81011c20-ffffffff81012068 system_call ffffffff81011c30-ffffffff81011c9b system_call_after_swapgs ffffffff81011c9c-ffffffff81011cb6 system_call_fastpath ffffffff81011cb7-ffffffff81011cbb ret_from_sys_call I.e. overlapping symbols, again some ASM special case that we have to fixup. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1254934136-8503-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 288 +++++++++++++++++++++++++++++++++-------------- 1 file changed, 201 insertions(+), 87 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index a6887f94dfe7..faa84f5d4f54 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -26,27 +26,35 @@ enum dso_origin { static void dsos__add(struct dso *dso); static struct dso *dsos__find(const char *name); +static struct map *map__new2(u64 start, struct dso *dso); +static void kernel_maps__insert(struct map *map); static struct rb_root kernel_maps; -static void dso__set_symbols_end(struct dso *self) +static void dso__fixup_sym_end(struct dso *self) { struct rb_node *nd, *prevnd = rb_first(&self->syms); + struct symbol *curr, *prev; if (prevnd == NULL) return; + curr = rb_entry(prevnd, struct symbol, rb_node); + for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { - struct symbol *prev = rb_entry(prevnd, struct symbol, rb_node), - *curr = rb_entry(nd, struct symbol, rb_node); + prev = curr; + curr = rb_entry(nd, struct symbol, rb_node); if (prev->end == prev->start) prev->end = curr->start - 1; - prevnd = nd; } + + /* Last entry */ + if (curr->end == curr->start) + curr->end = roundup(curr->start, 4096); } -static void kernel_maps__fixup_sym_end(void) +static void kernel_maps__fixup_end(void) { struct map *prev, *curr; struct rb_node *nd, *prevnd = rb_first(&kernel_maps); @@ -55,13 +63,17 @@ static void kernel_maps__fixup_sym_end(void) return; curr = rb_entry(prevnd, struct map, rb_node); - dso__set_symbols_end(curr->dso); for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { prev = curr; curr = rb_entry(nd, struct map, rb_node); prev->end = curr->start - 1; - dso__set_symbols_end(curr->dso); + } + + nd = rb_last(&curr->dso->syms); + if (nd) { + struct symbol *sym = rb_entry(nd, struct symbol, rb_node); + curr->end = sym->end; } } @@ -200,13 +212,16 @@ size_t dso__fprintf(struct dso *self, FILE *fp) return ret; } -static int maps__load_kallsyms(symbol_filter_t filter, int use_modules, int v) +/* + * Loads the function entries in /proc/kallsyms into kernel_map->dso, + * so that we can in the next step set the symbol ->end address and then + * call kernel_maps__split_kallsyms. + */ +static int kernel_maps__load_all_kallsyms(int v) { - struct map *map = kernel_map; char *line = NULL; size_t n; FILE *file = fopen("/proc/kallsyms", "r"); - int count = 0; if (file == NULL) goto out_failure; @@ -216,7 +231,7 @@ static int maps__load_kallsyms(symbol_filter_t filter, int use_modules, int v) struct symbol *sym; int line_len, len; char symbol_type; - char *module, *symbol_name; + char *symbol_name; line_len = getline(&line, &n, file); if (line_len < 0) @@ -241,20 +256,55 @@ static int maps__load_kallsyms(symbol_filter_t filter, int use_modules, int v) continue; symbol_name = line + len + 2; - module = strchr(symbol_name, '\t'); - if (module) { - char *module_name_end; + /* + * Will fix up the end later, when we have all symbols sorted. + */ + sym = symbol__new(start, 0, symbol_name, + kernel_map->dso->sym_priv_size, v); + if (sym == NULL) + goto out_delete_line; + + dso__insert_symbol(kernel_map->dso, sym); + } + + free(line); + fclose(file); + + return 0; + +out_delete_line: + free(line); +out_failure: + return -1; +} + +/* + * Split the symbols into maps, making sure there are no overlaps, i.e. the + * kernel range is broken in several maps, named [kernel].N, as we don't have + * the original ELF section names vmlinux have. + */ +static int kernel_maps__split_kallsyms(symbol_filter_t filter, int use_modules) +{ + struct map *map = kernel_map; + struct symbol *pos; + int count = 0; + struct rb_node *next = rb_first(&kernel_map->dso->syms); + int kernel_range = 0; + + while (next) { + char *module; + + pos = rb_entry(next, struct symbol, rb_node); + next = rb_next(&pos->rb_node); + + module = strchr(pos->name, '\t'); + if (module) { if (!use_modules) - continue; - *module = '\0'; - module = strchr(module + 1, '['); - if (!module) - continue; - module_name_end = strchr(module + 1, ']'); - if (!module_name_end) - continue; - *(module_name_end + 1) = '\0'; + goto delete_symbol; + + *module++ = '\0'; + if (strcmp(map->dso->name, module)) { map = kernel_maps__find_by_dso_name(module); if (!map) { @@ -263,50 +313,77 @@ static int maps__load_kallsyms(symbol_filter_t filter, int use_modules, int v) return -1; } } - start = map->map_ip(map, start); - } else - map = kernel_map; - /* - * Well fix up the end later, when we have all sorted. - */ - sym = symbol__new(start, 0, symbol_name, - map->dso->sym_priv_size, v); + /* + * So that we look just like we get from .ko files, + * i.e. not prelinked, relative to map->start. + */ + pos->start = map->map_ip(map, pos->start); + pos->end = map->map_ip(map, pos->end); + } else if (map != kernel_map) { + char dso_name[PATH_MAX]; + struct dso *dso; + + snprintf(dso_name, sizeof(dso_name), "[kernel].%d", + kernel_range++); + + dso = dso__new(dso_name, + kernel_map->dso->sym_priv_size); + if (dso == NULL) + return -1; + + map = map__new2(pos->start, dso); + if (map == NULL) { + dso__delete(dso); + return -1; + } - if (sym == NULL) - goto out_delete_line; + map->map_ip = vdso__map_ip; + kernel_maps__insert(map); + ++kernel_range; + } - if (filter && filter(map, sym)) - symbol__delete(sym, map->dso->sym_priv_size); - else { - dso__insert_symbol(map->dso, sym); + if (filter && filter(map, pos)) { +delete_symbol: + rb_erase(&pos->rb_node, &kernel_map->dso->syms); + symbol__delete(pos, kernel_map->dso->sym_priv_size); + } else { + if (map != kernel_map) { + rb_erase(&pos->rb_node, &kernel_map->dso->syms); + dso__insert_symbol(map->dso, pos); + } count++; } } - free(line); - fclose(file); - return count; +} -out_delete_line: - free(line); -out_failure: - return -1; + +static int kernel_maps__load_kallsyms(symbol_filter_t filter, + int use_modules, int v) +{ + if (kernel_maps__load_all_kallsyms(v)) + return -1; + + dso__fixup_sym_end(kernel_map->dso); + + return kernel_maps__split_kallsyms(filter, use_modules); } -static size_t kernel_maps__fprintf(FILE *fp) +static size_t kernel_maps__fprintf(FILE *fp, int v) { size_t printed = fprintf(stderr, "Kernel maps:\n"); struct rb_node *nd; - printed += map__fprintf(kernel_map, fp); - printed += dso__fprintf(kernel_map->dso, fp); - for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) { struct map *pos = rb_entry(nd, struct map, rb_node); + printed += fprintf(fp, "Map:"); printed += map__fprintf(pos, fp); - printed += dso__fprintf(pos->dso, fp); + if (v > 1) { + printed += dso__fprintf(pos->dso, fp); + printed += fprintf(fp, "--\n"); + } } return printed + fprintf(stderr, "END kernel maps\n"); @@ -594,6 +671,9 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, int fd, symbol_filter_t filter, int kernel, int kmodule, int v) { + struct map *curr_map = map; + struct dso *curr_dso = self; + size_t dso_name_len = strlen(self->short_name); Elf_Data *symstrs, *secstrs; uint32_t nr_syms; int err = -1; @@ -660,10 +740,9 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { struct symbol *f; const char *elf_name; - char *demangled; + char *demangled = NULL; int is_label = elf_sym__is_label(&sym); const char *section_name; - u64 sh_offset = 0; if (!is_label && !elf_sym__is_function(&sym)) continue; @@ -677,14 +756,51 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, if (is_label && !elf_sec__is_text(&shdr, secstrs)) continue; + elf_name = elf_sym__name(&sym, symstrs); section_name = elf_sec__name(&shdr, secstrs); - if ((kernel || kmodule)) { - if (strstr(section_name, ".init")) - sh_offset = shdr.sh_offset; + if (kernel || kmodule) { + char dso_name[PATH_MAX]; + + if (strcmp(section_name, + curr_dso->short_name + dso_name_len) == 0) + goto new_symbol; + + if (strcmp(section_name, ".text") == 0) { + curr_map = map; + curr_dso = self; + goto new_symbol; + } + + snprintf(dso_name, sizeof(dso_name), + "%s%s", self->short_name, section_name); + + curr_map = kernel_maps__find_by_dso_name(dso_name); + if (curr_map == NULL) { + u64 start = sym.st_value; + + if (kmodule) + start += map->start + shdr.sh_offset; + + curr_dso = dso__new(dso_name, self->sym_priv_size); + if (curr_dso == NULL) + goto out_elf_end; + curr_map = map__new2(start, curr_dso); + if (curr_map == NULL) { + dso__delete(curr_dso); + goto out_elf_end; + } + curr_map->map_ip = vdso__map_ip; + curr_dso->origin = DSO__ORIG_KERNEL; + kernel_maps__insert(curr_map); + dsos__add(curr_dso); + } else + curr_dso = curr_map->dso; + + goto new_symbol; } - if (self->adjust_symbols) { + if (curr_dso->adjust_symbols) { if (v > 2) printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n", (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset); @@ -696,25 +812,29 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, * DWARF DW_compile_unit has this, but we don't always have access * to it... */ - elf_name = elf_sym__name(&sym, symstrs); demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI); if (demangled != NULL) elf_name = demangled; - - f = symbol__new(sym.st_value + sh_offset, sym.st_size, elf_name, - self->sym_priv_size, v); +new_symbol: + f = symbol__new(sym.st_value, sym.st_size, elf_name, + curr_dso->sym_priv_size, v); free(demangled); if (!f) goto out_elf_end; - if (filter && filter(map, f)) - symbol__delete(f, self->sym_priv_size); + if (filter && filter(curr_map, f)) + symbol__delete(f, curr_dso->sym_priv_size); else { - dso__insert_symbol(self, f); + dso__insert_symbol(curr_dso, f); nr++; } } + /* + * For misannotated, zeroed, ASM function sizes. + */ + if (nr > 0) + dso__fixup_sym_end(self); err = nr; out_elf_end: elf_end(elf); @@ -883,27 +1003,17 @@ static void kernel_maps__insert(struct map *map) struct symbol *kernel_maps__find_symbol(u64 ip, struct map **mapp) { - /* - * We can't have kernel_map in kernel_maps because it spans an address - * space that includes the modules. The right way to fix this is to - * create several maps, so that we don't have overlapping ranges with - * modules. For now lets look first on the kernel dso. - */ struct map *map = maps__find(&kernel_maps, ip); - struct symbol *sym; + + if (mapp) + *mapp = map; if (map) { ip = map->map_ip(map, ip); - sym = map->dso->find_symbol(map->dso, ip); - } else { - map = kernel_map; - sym = map->dso->find_symbol(map->dso, ip); + return map->dso->find_symbol(map->dso, ip); } - if (mapp) - *mapp = map; - - return sym; + return NULL; } struct map *kernel_maps__find_by_dso_name(const char *name) @@ -994,6 +1104,14 @@ static int dsos__load_modules_sym_dir(char *dirname, last = rb_last(&map->dso->syms); if (last) { struct symbol *sym; + /* + * We do this here as well, even having the + * symbol size found in the symtab because + * misannotated ASM symbols may have the size + * set to zero. + */ + dso__fixup_sym_end(map->dso); + sym = rb_entry(last, struct symbol, rb_node); map->end = map->start + sym->end; } @@ -1163,17 +1281,11 @@ int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, } if (err <= 0) - err = maps__load_kallsyms(filter, use_modules, v); + err = kernel_maps__load_kallsyms(filter, use_modules, v); if (err > 0) { struct rb_node *node = rb_first(&dso->syms); struct symbol *sym = rb_entry(node, struct symbol, rb_node); - /* - * Now that we have all sorted out, just set the ->end of all - * symbols that still don't have it. - */ - dso__set_symbols_end(dso); - kernel_maps__fixup_sym_end(); kernel_map->start = sym->start; node = rb_last(&dso->syms); @@ -1181,14 +1293,16 @@ int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, kernel_map->end = sym->end; dso->origin = DSO__ORIG_KERNEL; + kernel_maps__insert(kernel_map); /* - * XXX See kernel_maps__find_symbol comment - * kernel_maps__insert(kernel_map) + * Now that we have all sorted out, just set the ->end of all + * maps: */ + kernel_maps__fixup_end(); dsos__add(dso); if (v > 0) - kernel_maps__fprintf(stderr); + kernel_maps__fprintf(stderr, v); } return err; -- cgit v1.2.3 From 97ea1a7fa62af0d8d49a0fc12796b0073537c9d8 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 8 Oct 2009 21:04:17 +0200 Subject: perf tools: Fix thread comm resolution in perf sched This reverts commit 9a92b479b2f088ee2d3194243f4c8e59b1b8c9c2 ("perf tools: Improve thread comm resolution in perf sched") and fixes the real bug. The bug was elsewhere: We are failing to resolve thread names in perf sched because the table of threads we are building, on top of comm events, has a per process granularity. But perf sched, unlike the other perf tools, needs a per thread granularity as we are profiling every tasks individually. So fix it by building our threads table using the tid instead of the pid as the thread identifier. v2: Revert the previous fix - it is not really needed Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras LKML-Reference: <1255028657-11158-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-sched.c | 46 ++++++---------------------------------------- tools/perf/util/thread.c | 32 +++++++------------------------- tools/perf/util/thread.h | 3 --- 3 files changed, 13 insertions(+), 68 deletions(-) diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 25b91e784332..6b00529ce348 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -638,7 +638,7 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) { struct thread *thread; - thread = threads__findnew(event->comm.pid, &threads, &last_match); + thread = threads__findnew(event->comm.tid, &threads, &last_match); dump_printf("%p [%p]: perf_event_comm: %s:%d\n", (void *)(offset + head), @@ -1034,36 +1034,6 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) atoms->nb_atoms++; } -static struct thread * -threads__findnew_from_ctx(u32 pid, struct trace_switch_event *switch_event) -{ - struct thread *th; - - th = threads__findnew_nocomm(pid, &threads, &last_match); - if (th->comm) - return th; - - if (pid == switch_event->prev_pid) - thread__set_comm(th, switch_event->prev_comm); - else - thread__set_comm(th, switch_event->next_comm); - return th; -} - -static struct thread * -threads__findnew_from_wakeup(struct trace_wakeup_event *wakeup_event) -{ - struct thread *th; - - th = threads__findnew_nocomm(wakeup_event->pid, &threads, &last_match); - if (th->comm) - return th; - - thread__set_comm(th, wakeup_event->comm); - - return th; -} - static void latency_switch_event(struct trace_switch_event *switch_event, struct event *event __used, @@ -1089,10 +1059,8 @@ latency_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %Ld < 0 ?\n", delta); - sched_out = threads__findnew_from_ctx(switch_event->prev_pid, - switch_event); - sched_in = threads__findnew_from_ctx(switch_event->next_pid, - switch_event); + sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); + sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); if (!out_events) { @@ -1158,7 +1126,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, if (!wakeup_event->success) return; - wakee = threads__findnew_from_wakeup(wakeup_event); + wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); if (!atoms) { thread_atoms_insert(wakee); @@ -1418,10 +1386,8 @@ map_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %Ld < 0 ?\n", delta); - sched_out = threads__findnew_from_ctx(switch_event->prev_pid, - switch_event); - sched_in = threads__findnew_from_ctx(switch_event->next_pid, - switch_event); + sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); + sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); curr_thread[this_cpu] = sched_in; diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 8bd5ca2d2f28..3b56aebb1f4b 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -6,17 +6,15 @@ #include "util.h" #include "debug.h" -static struct thread *thread__new(pid_t pid, int set_comm) +static struct thread *thread__new(pid_t pid) { struct thread *self = calloc(1, sizeof(*self)); if (self != NULL) { self->pid = pid; - if (set_comm) { - self->comm = malloc(32); - if (self->comm) - snprintf(self->comm, 32, ":%d", self->pid); - } + self->comm = malloc(32); + if (self->comm) + snprintf(self->comm, 32, ":%d", self->pid); self->maps = RB_ROOT; INIT_LIST_HEAD(&self->removed_maps); } @@ -52,10 +50,8 @@ static size_t thread__fprintf(struct thread *self, FILE *fp) return ret; } -static struct thread * -__threads__findnew(pid_t pid, struct rb_root *threads, - struct thread **last_match, - int set_comm) +struct thread * +threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) { struct rb_node **p = &threads->rb_node; struct rb_node *parent = NULL; @@ -84,8 +80,7 @@ __threads__findnew(pid_t pid, struct rb_root *threads, p = &(*p)->rb_right; } - th = thread__new(pid, set_comm); - + th = thread__new(pid); if (th != NULL) { rb_link_node(&th->rb_node, parent, p); rb_insert_color(&th->rb_node, threads); @@ -95,19 +90,6 @@ __threads__findnew(pid_t pid, struct rb_root *threads, return th; } -struct thread * -threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) -{ - return __threads__findnew(pid, threads, last_match, 1); -} - -struct thread * -threads__findnew_nocomm(pid_t pid, struct rb_root *threads, - struct thread **last_match) -{ - return __threads__findnew(pid, threads, last_match, 0); -} - struct thread * register_idle_thread(struct rb_root *threads, struct thread **last_match) { diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 75bc843950c4..845d9b62f96f 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -18,9 +18,6 @@ int thread__set_comm(struct thread *self, const char *comm); struct thread * threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match); struct thread * -threads__findnew_nocomm(pid_t pid, struct rb_root *threads, - struct thread **last_match); -struct thread * register_idle_thread(struct rb_root *threads, struct thread **last_match); void thread__insert_map(struct thread *self, struct map *map); int thread__fork(struct thread *self, struct thread *parent); -- cgit v1.2.3 From 26dd2cb074d9dc41c9e3cddd7bf175fd0a41febc Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 8 Oct 2009 22:07:29 +0200 Subject: perf tools: Provide backward compatibility with previous perf.data version We have merged the trace.info file into perf.data by adding one section in the perf headers. This makes it incompatible with previous version: the new perf tools can't read the older perf.data. To support the previous format, we check the headers size. If they have the same size than in the previous format, then ignore the trace info section that doesn't exist. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras LKML-Reference: <1255032449-12022-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/util/header.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 212fade7ee74..9aae360c0f28 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -287,10 +287,16 @@ struct perf_header *perf_header__read(int fd) do_read(fd, &f_header, sizeof(f_header)); if (f_header.magic != PERF_MAGIC || - f_header.size != sizeof(f_header) || f_header.attr_size != sizeof(f_attr)) die("incompatible file format"); + if (f_header.size != sizeof(f_header)) { + /* Support the previous format */ + if (f_header.size == offsetof(typeof(f_header), trace_info)) + f_header.trace_info.size = 0; + else + die("incompatible file format"); + } nr_attrs = f_header.attrs.size / sizeof(f_attr); lseek(fd, f_header.attrs.offset, SEEK_SET); -- cgit v1.2.3 From 04a705df47d1ea27ca2b066f24b1951c51792d0d Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 6 Oct 2009 16:42:08 +0200 Subject: perf_events: Check for filters on fixed counter events Intel fixed counters do not support all the filters possible with a generic counter. Thus, if a fixed counter event is passed but with certain filters set, then the fixed_mode_idx() function must fail and the event must be measured in a generic counter instead. Reject filters are: inv, edge, cnt-mask. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <1254840129-6198-2-git-send-email-eranian@gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/perf_event.h | 13 ++++++++++++- arch/x86/kernel/cpu/perf_event.c | 6 ++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index ad7ce3fd5065..8d9f8548a870 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -28,9 +28,20 @@ */ #define ARCH_PERFMON_EVENT_MASK 0xffff +/* + * filter mask to validate fixed counter events. + * the following filters disqualify for fixed counters: + * - inv + * - edge + * - cnt-mask + * The other filters are supported by fixed counters. + * The any-thread option is supported starting with v3. + */ +#define ARCH_PERFMON_EVENT_FILTER_MASK 0xff840000 + #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index b5801c311846..1d16bd69551e 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1349,6 +1349,12 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) if (!x86_pmu.num_events_fixed) return -1; + /* + * fixed counters do not take all possible filters + */ + if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK) + return -1; + if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) return X86_PMC_IDX_FIXED_INSTRUCTIONS; if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) -- cgit v1.2.3 From b690081d4d3f6a23541493f1682835c3cd5c54a1 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 6 Oct 2009 16:42:09 +0200 Subject: perf_events: Add event constraints support for Intel processors On some Intel processors, not all events can be measured in all counters. Some events can only be measured in one particular counter, for instance. Assigning an event to the wrong counter does not crash the machine but this yields bogus counts, i.e., silent error. This patch changes the event to counter assignment logic to take into account event constraints for Intel P6, Core and Nehalem processors. There is no contraints on Intel Atom. There are constraints on Intel Yonah (Core Duo) but they are not provided in this patch given that this processor is not yet supported by perf_events. As a result of the constraints, it is possible for some event groups to never actually be loaded onto the PMU if they contain two events which can only be measured on a single counter. That situation can be detected with the scaling information extracted with read(). Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <1254840129-6198-3-git-send-email-eranian@gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 109 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 105 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 1d16bd69551e..9c758548a0e6 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -77,6 +77,18 @@ struct cpu_hw_events { struct debug_store *ds; }; +struct event_constraint { + unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + int code; +}; + +#define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) } +#define EVENT_CONSTRAINT_END { .code = 0, .idxmsk[0] = 0 } + +#define for_each_event_constraint(e, c) \ + for ((e) = (c); (e)->idxmsk[0]; (e)++) + + /* * struct x86_pmu - generic x86 pmu */ @@ -102,6 +114,7 @@ struct x86_pmu { u64 intel_ctrl; void (*enable_bts)(u64 config); void (*disable_bts)(void); + int (*get_event_idx)(struct hw_perf_event *hwc); }; static struct x86_pmu x86_pmu __read_mostly; @@ -110,6 +123,8 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; +static const struct event_constraint *event_constraint; + /* * Not sure about some of these */ @@ -155,6 +170,16 @@ static u64 p6_pmu_raw_event(u64 hw_event) return hw_event & P6_EVNTSEL_MASK; } +static const struct event_constraint intel_p6_event_constraints[] = +{ + EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */ + EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ + EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */ + EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ + EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ + EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ + EVENT_CONSTRAINT_END +}; /* * Intel PerfMon v3. Used on Core2 and later. @@ -170,6 +195,35 @@ static const u64 intel_perfmon_event_map[] = [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, }; +static const struct event_constraint intel_core_event_constraints[] = +{ + EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ + EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ + EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ + EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ + EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ + EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ + EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ + EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ + EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ + EVENT_CONSTRAINT_END +}; + +static const struct event_constraint intel_nehalem_event_constraints[] = +{ + EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ + EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ + EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ + EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ + EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ + EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */ + EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ + EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */ + EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */ + EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */ + EVENT_CONSTRAINT_END +}; + static u64 intel_pmu_event_map(int hw_event) { return intel_perfmon_event_map[hw_event]; @@ -932,6 +986,8 @@ static int __hw_perf_event_init(struct perf_event *event) */ hwc->config = ARCH_PERFMON_EVENTSEL_INT; + hwc->idx = -1; + /* * Count user and OS events unless requested not to. */ @@ -1365,6 +1421,45 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) return -1; } +/* + * generic counter allocator: get next free counter + */ +static int gen_get_event_idx(struct hw_perf_event *hwc) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int idx; + + idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events); + return idx == x86_pmu.num_events ? -1 : idx; +} + +/* + * intel-specific counter allocator: check event constraints + */ +static int intel_get_event_idx(struct hw_perf_event *hwc) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + const struct event_constraint *event_constraint; + int i, code; + + if (!event_constraint) + goto skip; + + code = hwc->config & 0xff; + + for_each_event_constraint(event_constraint, event_constraint) { + if (code == event_constraint->code) { + for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) { + if (!test_and_set_bit(i, cpuc->used_mask)) + return i; + } + return -1; + } + } +skip: + return gen_get_event_idx(hwc); +} + /* * Find a PMC slot for the freshly enabled / scheduled in event: */ @@ -1402,11 +1497,10 @@ static int x86_pmu_enable(struct perf_event *event) } else { idx = hwc->idx; /* Try to get the previous generic event again */ - if (test_and_set_bit(idx, cpuc->used_mask)) { + if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) { try_generic: - idx = find_first_zero_bit(cpuc->used_mask, - x86_pmu.num_events); - if (idx == x86_pmu.num_events) + idx = x86_pmu.get_event_idx(hwc); + if (idx == -1) return -EAGAIN; set_bit(idx, cpuc->used_mask); @@ -1883,6 +1977,7 @@ static struct x86_pmu p6_pmu = { */ .event_bits = 32, .event_mask = (1ULL << 32) - 1, + .get_event_idx = intel_get_event_idx, }; static struct x86_pmu intel_pmu = { @@ -1906,6 +2001,7 @@ static struct x86_pmu intel_pmu = { .max_period = (1ULL << 31) - 1, .enable_bts = intel_pmu_enable_bts, .disable_bts = intel_pmu_disable_bts, + .get_event_idx = intel_get_event_idx, }; static struct x86_pmu amd_pmu = { @@ -1926,6 +2022,7 @@ static struct x86_pmu amd_pmu = { .apic = 1, /* use highest bit to detect overflow */ .max_period = (1ULL << 47) - 1, + .get_event_idx = gen_get_event_idx, }; static int p6_pmu_init(void) @@ -1938,10 +2035,12 @@ static int p6_pmu_init(void) case 7: case 8: case 11: /* Pentium III */ + event_constraint = intel_p6_event_constraints; break; case 9: case 13: /* Pentium M */ + event_constraint = intel_p6_event_constraints; break; default: pr_cont("unsupported p6 CPU model %d ", @@ -2013,12 +2112,14 @@ static int intel_pmu_init(void) sizeof(hw_cache_event_ids)); pr_cont("Core2 events, "); + event_constraint = intel_core_event_constraints; break; default: case 26: memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + event_constraint = intel_nehalem_event_constraints; pr_cont("Nehalem/Corei7 events, "); break; case 28: -- cgit v1.2.3 From fe9081cc9bdabb0be953a39ad977cea14e35bce5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 8 Oct 2009 11:56:07 +0200 Subject: perf, x86: Add simple group validation Refuse to add events when the group wouldn't fit onto the PMU anymore. Naive implementation. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: <1254911461.26976.239.camel@twins> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 90 ++++++++++++++++++++++++++++++---------- 1 file changed, 69 insertions(+), 21 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 9c758548a0e6..9961d845719d 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -114,7 +114,8 @@ struct x86_pmu { u64 intel_ctrl; void (*enable_bts)(u64 config); void (*disable_bts)(void); - int (*get_event_idx)(struct hw_perf_event *hwc); + int (*get_event_idx)(struct cpu_hw_events *cpuc, + struct hw_perf_event *hwc); }; static struct x86_pmu x86_pmu __read_mostly; @@ -523,7 +524,7 @@ static u64 intel_pmu_raw_event(u64 hw_event) #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL #define CORE_EVNTSEL_INV_MASK 0x00800000ULL -#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL +#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL #define CORE_EVNTSEL_MASK \ (CORE_EVNTSEL_EVENT_MASK | \ @@ -1390,8 +1391,7 @@ static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx) x86_pmu_enable_event(hwc, idx); } -static int -fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) +static int fixed_mode_idx(struct hw_perf_event *hwc) { unsigned int hw_event; @@ -1424,9 +1424,9 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) /* * generic counter allocator: get next free counter */ -static int gen_get_event_idx(struct hw_perf_event *hwc) +static int +gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx; idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events); @@ -1436,16 +1436,16 @@ static int gen_get_event_idx(struct hw_perf_event *hwc) /* * intel-specific counter allocator: check event constraints */ -static int intel_get_event_idx(struct hw_perf_event *hwc) +static int +intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); const struct event_constraint *event_constraint; int i, code; if (!event_constraint) goto skip; - code = hwc->config & 0xff; + code = hwc->config & CORE_EVNTSEL_EVENT_MASK; for_each_event_constraint(event_constraint, event_constraint) { if (code == event_constraint->code) { @@ -1457,26 +1457,22 @@ static int intel_get_event_idx(struct hw_perf_event *hwc) } } skip: - return gen_get_event_idx(hwc); + return gen_get_event_idx(cpuc, hwc); } -/* - * Find a PMC slot for the freshly enabled / scheduled in event: - */ -static int x86_pmu_enable(struct perf_event *event) +static int +x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - struct hw_perf_event *hwc = &event->hw; int idx; - idx = fixed_mode_idx(event, hwc); + idx = fixed_mode_idx(hwc); if (idx == X86_PMC_IDX_FIXED_BTS) { /* BTS is already occupied. */ if (test_and_set_bit(idx, cpuc->used_mask)) return -EAGAIN; hwc->config_base = 0; - hwc->event_base = 0; + hwc->event_base = 0; hwc->idx = idx; } else if (idx >= 0) { /* @@ -1499,17 +1495,33 @@ static int x86_pmu_enable(struct perf_event *event) /* Try to get the previous generic event again */ if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) { try_generic: - idx = x86_pmu.get_event_idx(hwc); + idx = x86_pmu.get_event_idx(cpuc, hwc); if (idx == -1) return -EAGAIN; set_bit(idx, cpuc->used_mask); hwc->idx = idx; } - hwc->config_base = x86_pmu.eventsel; - hwc->event_base = x86_pmu.perfctr; + hwc->config_base = x86_pmu.eventsel; + hwc->event_base = x86_pmu.perfctr; } + return idx; +} + +/* + * Find a PMC slot for the freshly enabled / scheduled in event: + */ +static int x86_pmu_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx; + + idx = x86_schedule_event(cpuc, hwc); + if (idx < 0) + return idx; + perf_events_lapic_init(); x86_pmu.disable(hwc, idx); @@ -2212,11 +2224,47 @@ static const struct pmu pmu = { .unthrottle = x86_pmu_unthrottle, }; +static int +validate_event(struct cpu_hw_events *cpuc, struct perf_event *event) +{ + struct hw_perf_event fake_event = event->hw; + + if (event->pmu != &pmu) + return 0; + + return x86_schedule_event(cpuc, &fake_event); +} + +static int validate_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct cpu_hw_events fake_pmu; + + memset(&fake_pmu, 0, sizeof(fake_pmu)); + + if (!validate_event(&fake_pmu, leader)) + return -ENOSPC; + + list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + if (!validate_event(&fake_pmu, sibling)) + return -ENOSPC; + } + + if (!validate_event(&fake_pmu, event)) + return -ENOSPC; + + return 0; +} + const struct pmu *hw_perf_event_init(struct perf_event *event) { int err; err = __hw_perf_event_init(event); + if (!err) { + if (event->group_leader != event) + err = validate_group(event); + } if (err) { if (event->destroy) event->destroy(event); -- cgit v1.2.3 From 3bb258bf430d29a24350fe4f44f8bf07b7b7a8f6 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 4 Oct 2009 17:53:29 -0700 Subject: ftrace.c: Add #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - Remove prefixes from pr_, use pr_fmt(fmt). No change in output. Signed-off-by: Joe Perches Acked-by: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <9b377eefae9e28c599dd4a17bdc81172965e9931.1254701151.git.joe@perches.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/ftrace.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 9dbb527e1652..25e6f5fc4b1e 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -9,6 +9,8 @@ * the dangers of modifying code on the run. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -336,15 +338,15 @@ int __init ftrace_dyn_arch_init(void *data) switch (faulted) { case 0: - pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); + pr_info("converting mcount calls to 0f 1f 44 00 00\n"); memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); break; case 1: - pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); + pr_info("converting mcount calls to 66 66 66 66 90\n"); memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); break; case 2: - pr_info("ftrace: converting mcount calls to jmp . + 5\n"); + pr_info("converting mcount calls to jmp . + 5\n"); memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); break; } -- cgit v1.2.3 From 3c355863fb32070a2800f41106519c5c3038623a Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 4 Oct 2009 17:53:40 -0700 Subject: testmmiotrace.c: Add and use pr_fmt(fmt) - Add #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt. - Strip MODULE_NAME from pr_s. - Remove MODULE_NAME definition. Signed-off-by: Joe Perches LKML-Reference: <3bb66cc7f85f77b9416902e1be7076f7e3f4ad48.1254701151.git.joe@perches.com> Signed-off-by: Ingo Molnar --- arch/x86/mm/testmmiotrace.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c index 427fd1b56df5..8565d944f7cf 100644 --- a/arch/x86/mm/testmmiotrace.c +++ b/arch/x86/mm/testmmiotrace.c @@ -1,12 +1,13 @@ /* * Written by Pekka Paalanen, 2008-2009 */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include -#define MODULE_NAME "testmmiotrace" - static unsigned long mmio_address; module_param(mmio_address, ulong, 0); MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB " @@ -30,7 +31,7 @@ static unsigned v32(unsigned i) static void do_write_test(void __iomem *p) { unsigned int i; - pr_info(MODULE_NAME ": write test.\n"); + pr_info("write test.\n"); mmiotrace_printk("Write test.\n"); for (i = 0; i < 256; i++) @@ -47,7 +48,7 @@ static void do_read_test(void __iomem *p) { unsigned int i; unsigned errs[3] = { 0 }; - pr_info(MODULE_NAME ": read test.\n"); + pr_info("read test.\n"); mmiotrace_printk("Read test.\n"); for (i = 0; i < 256; i++) @@ -68,7 +69,7 @@ static void do_read_test(void __iomem *p) static void do_read_far_test(void __iomem *p) { - pr_info(MODULE_NAME ": read far test.\n"); + pr_info("read far test.\n"); mmiotrace_printk("Read far test.\n"); ioread32(p + read_far); @@ -78,7 +79,7 @@ static void do_test(unsigned long size) { void __iomem *p = ioremap_nocache(mmio_address, size); if (!p) { - pr_err(MODULE_NAME ": could not ioremap, aborting.\n"); + pr_err("could not ioremap, aborting.\n"); return; } mmiotrace_printk("ioremap returned %p.\n", p); @@ -94,24 +95,22 @@ static int __init init(void) unsigned long size = (read_far) ? (8 << 20) : (16 << 10); if (mmio_address == 0) { - pr_err(MODULE_NAME ": you have to use the module argument " - "mmio_address.\n"); - pr_err(MODULE_NAME ": DO NOT LOAD THIS MODULE UNLESS" - " YOU REALLY KNOW WHAT YOU ARE DOING!\n"); + pr_err("you have to use the module argument mmio_address.\n"); + pr_err("DO NOT LOAD THIS MODULE UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!\n"); return -ENXIO; } - pr_warning(MODULE_NAME ": WARNING: mapping %lu kB @ 0x%08lx in PCI " - "address space, and writing 16 kB of rubbish in there.\n", - size >> 10, mmio_address); + pr_warning("WARNING: mapping %lu kB @ 0x%08lx in PCI address space, " + "and writing 16 kB of rubbish in there.\n", + size >> 10, mmio_address); do_test(size); - pr_info(MODULE_NAME ": All done.\n"); + pr_info("All done.\n"); return 0; } static void __exit cleanup(void) { - pr_debug(MODULE_NAME ": unloaded.\n"); + pr_debug("unloaded.\n"); } module_init(init); -- cgit v1.2.3 From 7e4ff9e3e8f88de8a8536f43294cd32b4e7d9123 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Mon, 12 Oct 2009 07:56:03 +0200 Subject: perf tools: Fix counter sample frequency breakage Commit 42e59d7d19dc4b4 switched to a default sample frequency of 1KHz, which overrides any user supplied count, causing sched, top and timechart to miss events due to their discrete events being flagged PERF_SAMPLE_PERIOD. Override default sample frequency when the user profides a period count, and make both record and top honor that user supplied option. Signed-off-by: Mike Galbraith Cc: Peter Zijlstra Cc: Arjan van de Ven LKML-Reference: <1255326963.15107.2.camel@marge.simson.net> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 14 +++++++++++++- tools/perf/builtin-top.c | 28 +++++++++++++++++++++------- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 59af03d80d07..4e3a374e7aa7 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -26,7 +26,7 @@ static int fd[MAX_NR_CPUS][MAX_COUNTERS]; -static long default_interval = 100000; +static long default_interval = 0; static int nr_cpus = 0; static unsigned int page_size; @@ -730,6 +730,18 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) attrs[0].config = PERF_COUNT_HW_CPU_CYCLES; } + /* + * User specified count overrides default frequency. + */ + if (default_interval) + freq = 0; + else if (freq) { + default_interval = freq; + } else { + fprintf(stderr, "frequency and count are zero, aborting\n"); + exit(EXIT_FAILURE); + } + for (counter = 0; counter < nr_counters; counter++) { if (attrs[counter].sample_period) continue; diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index d978dc99236c..c0f69e80b2cc 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -57,7 +57,7 @@ static int fd[MAX_NR_CPUS][MAX_COUNTERS]; static int system_wide = 0; -static int default_interval = 100000; +static int default_interval = 0; static int count_filter = 5; static int print_entries = 15; @@ -975,7 +975,13 @@ static void start_counter(int i, int counter) attr = attrs + counter; attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; - attr->freq = freq; + + if (freq) { + attr->sample_type |= PERF_SAMPLE_PERIOD; + attr->freq = 1; + attr->sample_freq = freq; + } + attr->inherit = (cpu < 0) && inherit; try_again: @@ -1130,11 +1136,6 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) if (argc) usage_with_options(top_usage, options); - if (freq) { - default_interval = freq; - freq = 1; - } - /* CPU and PID are mutually exclusive */ if (target_pid != -1 && profile_cpu != -1) { printf("WARNING: PID switch overriding CPU\n"); @@ -1151,6 +1152,19 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) parse_symbols(); parse_source(sym_filter_entry); + + /* + * User specified count overrides default frequency. + */ + if (default_interval) + freq = 0; + else if (freq) { + default_interval = freq; + } else { + fprintf(stderr, "frequency and count are zero, aborting\n"); + exit(EXIT_FAILURE); + } + /* * Fill in the ones not specifically initialized via -c: */ -- cgit v1.2.3 From 55ffb7a6bd45d0083ffb132381cb46964a4afe01 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sat, 10 Oct 2009 14:46:04 +0200 Subject: perf sched: Add -C option to measure on a specific CPU To refresh, trying to sched record only one CPU results in bogus latencies as below. I fixed^Wmade it stop doing the bad thing today, by following task migration events properly. Before: marge:/root/tmp # taskset -c 1 perf sched record -C 0 -- sleep 10 marge:/root/tmp # perf sched lat ----------------------------------------------------------------------------------------- Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | ----------------------------------------------------------------------------------------- Xorg:4943 | 1.290 ms | 1 | avg: 1670.132 ms | max: 1670.132 ms | hald-addon-stor:3569 | 0.091 ms | 3 | avg: 658.609 ms | max: 1975.797 ms | hald-addon-stor:3573 | 0.209 ms | 4 | avg: 499.138 ms | max: 1990.565 ms | audispd:4270 | 0.012 ms | 1 | avg: 0.015 ms | max: 0.015 ms | .... marge:/root/tmp # perf sched trace|grep 'Xorg:4943' swapper-0 [000] 401.184013288: sched_stat_runtime: task: Xorg:4943 runtime: 1233188 [ns], vruntime: 19105169779 [ns] rt2870TimerQHan-4947 [000] 402.854140127: sched_stat_wait: task: Xorg:4943 wait: 580073 [ns] rt2870TimerQHan-4947 [000] 402.854141770: sched_migrate_task: task Xorg:4943 [140] from: 1 to: 0 rt2870TimerQHan-4947 [000] 402.854143854: sched_stat_wait: task: Xorg:4943 wait: 0 [ns] rt2870TimerQHan-4947 [000] 402.854145397: sched_switch: task rt2870TimerQHan:4947 [140] (D) ==> Xorg:4943 [140] Xorg-4943 [000] 402.854193133: sched_stat_runtime: task: Xorg:4943 runtime: 56546 [ns], vruntime: 11766332500 [ns] Xorg-4943 [000] 402.854196842: sched_switch: task Xorg:4943 [140] (S) ==> swapper:0 [140] After: marge:/root/tmp # taskset -c 1 perf sched record -C 0 -- sleep 10 marge:/root/tmp # perf sched lat ----------------------------------------------------------------------------------------- Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | ----------------------------------------------------------------------------------------- amarokapp:11150 | 271.297 ms | 878 | avg: 0.130 ms | max: 1.057 ms | konsole:5965 | 1.370 ms | 12 | avg: 0.092 ms | max: 0.855 ms | Xorg:4943 | 179.980 ms | 1109 | avg: 0.087 ms | max: 1.206 ms | hald-addon-stor:3574 | 0.212 ms | 9 | avg: 0.040 ms | max: 0.169 ms | hald-addon-stor:3570 | 0.223 ms | 9 | avg: 0.037 ms | max: 0.223 ms | klauncher:5864 | 0.550 ms | 8 | avg: 0.032 ms | max: 0.048 ms | The 'Maximum delay ms' results are now sane. Signed-off-by: Mike Galbraith LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-sched.c | 101 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 100 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 6b00529ce348..387a44234368 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -33,6 +33,8 @@ static u64 sample_type; static char default_sort_order[] = "avg, max, switch, runtime"; static char *sort_order = default_sort_order; +static int profile_cpu = -1; + static char *cwd; static int cwdlen; @@ -75,6 +77,7 @@ enum sched_event_type { SCHED_EVENT_RUN, SCHED_EVENT_SLEEP, SCHED_EVENT_WAKEUP, + SCHED_EVENT_MIGRATION, }; struct sched_atom { @@ -399,6 +402,8 @@ process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom) ret = sem_post(atom->wait_sem); BUG_ON(ret); break; + case SCHED_EVENT_MIGRATION: + break; default: BUG_ON(1); } @@ -746,6 +751,22 @@ struct trace_fork_event { u32 child_pid; }; +struct trace_migrate_task_event { + u32 size; + + u16 common_type; + u8 common_flags; + u8 common_preempt_count; + u32 common_pid; + u32 common_tgid; + + char comm[16]; + u32 pid; + + u32 prio; + u32 cpu; +}; + struct trace_sched_handler { void (*switch_event)(struct trace_switch_event *, struct event *, @@ -770,6 +791,12 @@ struct trace_sched_handler { int cpu, u64 timestamp, struct thread *thread); + + void (*migrate_task_event)(struct trace_migrate_task_event *, + struct event *, + int cpu, + u64 timestamp, + struct thread *thread); }; @@ -1140,7 +1167,12 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, atom = list_entry(atoms->work_list.prev, struct work_atom, list); - if (atom->state != THREAD_SLEEPING) + /* + * You WILL be missing events if you've recorded only + * one CPU, or are only looking at only one, so don't + * make useless noise. + */ + if (profile_cpu == -1 && atom->state != THREAD_SLEEPING) nr_state_machine_bugs++; nr_timestamps++; @@ -1153,11 +1185,51 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, atom->wake_up_time = timestamp; } +static void +latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, + struct event *__event __used, + int cpu __used, + u64 timestamp, + struct thread *thread __used) +{ + struct work_atoms *atoms; + struct work_atom *atom; + struct thread *migrant; + + /* + * Only need to worry about migration when profiling one CPU. + */ + if (profile_cpu == -1) + return; + + migrant = threads__findnew(migrate_task_event->pid, &threads, &last_match); + atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); + if (!atoms) { + thread_atoms_insert(migrant); + register_pid(migrant->pid, migrant->comm); + atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); + if (!atoms) + die("migration-event: Internal tree error"); + add_sched_out_event(atoms, 'R', timestamp); + } + + BUG_ON(list_empty(&atoms->work_list)); + + atom = list_entry(atoms->work_list.prev, struct work_atom, list); + atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp; + + nr_timestamps++; + + if (atom->sched_out_time > timestamp) + nr_unordered_timestamps++; +} + static struct trace_sched_handler lat_ops = { .wakeup_event = latency_wakeup_event, .switch_event = latency_switch_event, .runtime_event = latency_runtime_event, .fork_event = latency_fork_event, + .migrate_task_event = latency_migrate_task_event, }; static void output_lat_thread(struct work_atoms *work_list) @@ -1517,6 +1589,26 @@ process_sched_exit_event(struct event *event, printf("sched_exit event %p\n", event); } +static void +process_sched_migrate_task_event(struct raw_event_sample *raw, + struct event *event, + int cpu __used, + u64 timestamp __used, + struct thread *thread __used) +{ + struct trace_migrate_task_event migrate_task_event; + + FILL_COMMON_FIELDS(migrate_task_event, event, raw->data); + + FILL_ARRAY(migrate_task_event, comm, event, raw->data); + FILL_FIELD(migrate_task_event, pid, event, raw->data); + FILL_FIELD(migrate_task_event, prio, event, raw->data); + FILL_FIELD(migrate_task_event, cpu, event, raw->data); + + if (trace_handler->migrate_task_event) + trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread); +} + static void process_raw_event(event_t *raw_event __used, void *more_data, int cpu, u64 timestamp, struct thread *thread) @@ -1540,6 +1632,8 @@ process_raw_event(event_t *raw_event __used, void *more_data, process_sched_fork_event(raw, event, cpu, timestamp, thread); if (!strcmp(event->name, "sched_process_exit")) process_sched_exit_event(event, cpu, timestamp, thread); + if (!strcmp(event->name, "sched_migrate_task")) + process_sched_migrate_task_event(raw, event, cpu, timestamp, thread); } static int @@ -1589,6 +1683,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) return -1; } + if (profile_cpu != -1 && profile_cpu != (int) cpu) + return 0; + process_raw_event(event, more_data, cpu, timestamp, thread); return 0; @@ -1771,6 +1868,8 @@ static const struct option latency_options[] = { "sort by key(s): runtime, switch, avg, max"), OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), + OPT_INTEGER('C', "CPU", &profile_cpu, + "CPU to profile on"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), OPT_END() -- cgit v1.2.3 From 369bc18f9a6c4e2686204c1d7476ab684a720968 Mon Sep 17 00:00:00 2001 From: Stefan Assmann Date: Mon, 12 Oct 2009 22:17:21 +0200 Subject: ftrace: add kernel command line graph function filtering Add a command line parameter to allow limiting the function graphs that are traced on boot up from the given top-level callers , when ftrace=function_graph is specified. This patch adds the following command line option: ftrace_graph_filter=function-list Where function-list is a comma separated list of functions to filter. [fweisbec@gmail.com: picked the documentation changes from the v2 patch] Signed-off-by: Stefan Assmann Acked-by: Steven Rostedt LKML-Reference: <4AD2DEB9.2@redhat.com> Signed-off-by: Frederic Weisbecker --- Documentation/kernel-parameters.txt | 7 +++++++ kernel/trace/ftrace.c | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 6fa7292947e5..1dc4b9cc20e5 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -778,6 +778,13 @@ and is between 256 and 4096 characters. It is defined in the file by the set_ftrace_notrace file in the debugfs tracing directory. + ftrace_graph_filter=[function-list] + [FTRACE] Limit the top level callers functions traced + by the function graph tracer at boot up. + function-list is a comma separated list of functions + that can be changed at run time by the + set_graph_function file in the debugfs tracing directory. + gamecon.map[2|3]= [HW,JOY] Multisystem joystick and NES/SNES/PSX pad support via parallel port (up to 5 devices per port) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9a72853a8f0a..91283d40821e 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -78,6 +78,10 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static int ftrace_set_func(unsigned long *array, int *idx, char *buffer); +#endif + static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) { struct ftrace_ops *op = ftrace_list; @@ -2248,6 +2252,7 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset) #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; +static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; static int __init set_ftrace_notrace(char *str) { @@ -2263,6 +2268,31 @@ static int __init set_ftrace_filter(char *str) } __setup("ftrace_filter=", set_ftrace_filter); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static int __init set_graph_function(char *str) +{ + strncpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); + return 1; +} +__setup("ftrace_graph_filter=", set_graph_function); + +static void __init set_ftrace_early_graph(char *buf) +{ + int ret; + char *func; + + while (buf) { + func = strsep(&buf, ","); + /* we allow only one expression at a time */ + ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, + func); + if (ret) + printk(KERN_DEBUG "ftrace: function %s not " + "traceable\n", func); + } +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + static void __init set_ftrace_early_filter(char *buf, int enable) { char *func; @@ -2279,6 +2309,10 @@ static void __init set_ftrace_early_filters(void) set_ftrace_early_filter(ftrace_filter_buf, 1); if (ftrace_notrace_buf[0]) set_ftrace_early_filter(ftrace_notrace_buf, 0); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if (ftrace_graph_buf[0]) + set_ftrace_early_graph(ftrace_graph_buf); +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ } static int -- cgit v1.2.3 From 7a693d3f0d10f978ebdf3082c41404ab97106567 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 13 Oct 2009 08:16:30 +0200 Subject: perf_events, x86: Fix event constraints code There was namespace overlap due to a rename i did - this caused the following build warning, reported by Stephen Rothwell against linux-next x86_64 allmodconfig: arch/x86/kernel/cpu/perf_event.c: In function 'intel_get_event_idx': arch/x86/kernel/cpu/perf_event.c:1445: warning: 'event_constraint' is used uninitialized in this function This is a real bug not just a warning: fix it by renaming the global event-constraints table pointer to 'event_constraints'. Reported-by: Stephen Rothwell Cc: Stephane Eranian Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <20091013144223.369d616d.sfr@canb.auug.org.au> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 9961d845719d..2e20bca3cca1 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -124,7 +124,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; -static const struct event_constraint *event_constraint; +static const struct event_constraint *event_constraints; /* * Not sure about some of these @@ -1442,12 +1442,12 @@ intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) const struct event_constraint *event_constraint; int i, code; - if (!event_constraint) + if (!event_constraints) goto skip; code = hwc->config & CORE_EVNTSEL_EVENT_MASK; - for_each_event_constraint(event_constraint, event_constraint) { + for_each_event_constraint(event_constraint, event_constraints) { if (code == event_constraint->code) { for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) { if (!test_and_set_bit(i, cpuc->used_mask)) @@ -2047,12 +2047,12 @@ static int p6_pmu_init(void) case 7: case 8: case 11: /* Pentium III */ - event_constraint = intel_p6_event_constraints; + event_constraints = intel_p6_event_constraints; break; case 9: case 13: /* Pentium M */ - event_constraint = intel_p6_event_constraints; + event_constraints = intel_p6_event_constraints; break; default: pr_cont("unsupported p6 CPU model %d ", @@ -2124,14 +2124,14 @@ static int intel_pmu_init(void) sizeof(hw_cache_event_ids)); pr_cont("Core2 events, "); - event_constraint = intel_core_event_constraints; + event_constraints = intel_core_event_constraints; break; default: case 26: memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - event_constraint = intel_nehalem_event_constraints; + event_constraints = intel_nehalem_event_constraints; pr_cont("Nehalem/Corei7 events, "); break; case 28: -- cgit v1.2.3 From aef6f81b55f462082699c06e8e67e6eb5630ed45 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 12 Oct 2009 22:23:24 +0200 Subject: tracing: Rename set_ftrace to set_bootup_ftrace Do this rename because set_ftrace is too much generic and not enough self-explainable as a name. Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Li Zefan --- kernel/trace/trace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 45068269ebb1..866daf8497f1 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -129,7 +129,7 @@ static int tracing_set_tracer(const char *buf); static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; static char *default_bootup_tracer; -static int __init set_ftrace(char *str) +static int __init set_bootup_ftrace(char *str) { strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); default_bootup_tracer = bootup_tracer_buf; @@ -137,7 +137,7 @@ static int __init set_ftrace(char *str) ring_buffer_expanded = 1; return 1; } -__setup("ftrace=", set_ftrace); +__setup("ftrace=", set_bootup_ftrace); static int __init set_ftrace_dump_on_oops(char *str) { -- cgit v1.2.3 From bf7c5b43a12614847b83f507fb169ad30640e406 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 12 Oct 2009 22:31:32 +0200 Subject: tracing: Remove unused ftrace_trace_addr helper Remove the ftrace_trace_addr() function as only its off-case is implemented and there are no users of it currently. But we keep ftrace_graph_addr() off-case, in case someone come to use the function graph tracer to profit from top-level callers filtering. Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Li Zefan --- kernel/trace/trace.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 365fb19d9e11..f22a7ac32380 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -483,10 +483,6 @@ static inline int ftrace_graph_addr(unsigned long addr) return 0; } #else -static inline int ftrace_trace_addr(unsigned long addr) -{ - return 1; -} static inline int ftrace_graph_addr(unsigned long addr) { return 1; -- cgit v1.2.3 From cfed95a693e1ea5d08b9c9019bc30e448437ee2f Mon Sep 17 00:00:00 2001 From: Vincent Legoll Date: Tue, 13 Oct 2009 10:18:16 +0200 Subject: perf tools: Do not manually count string lengths Use strlen & macros instead of manually counting string lengths as this is error prone and may lend to bugs. Signed-off-by: Vincent Legoll Cc: Linus Torvalds LKML-Reference: <4727185d0910130118m5387058dndb02ac9b384af9f0@mail.gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/perf.c | 16 ++++++++-------- tools/perf/util/cache.h | 5 +++++ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 19fc7feb9d59..624e62d9d1e0 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -89,8 +89,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) /* * Check remaining flags. */ - if (!prefixcmp(cmd, "--exec-path")) { - cmd += 11; + if (!prefixcmp(cmd, CMD_EXEC_PATH)) { + cmd += strlen(CMD_EXEC_PATH); if (*cmd == '=') perf_set_argv_exec_path(cmd + 1); else { @@ -117,8 +117,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) (*argv)++; (*argc)--; handled++; - } else if (!prefixcmp(cmd, "--perf-dir=")) { - setenv(PERF_DIR_ENVIRONMENT, cmd + 10, 1); + } else if (!prefixcmp(cmd, CMD_PERF_DIR)) { + setenv(PERF_DIR_ENVIRONMENT, cmd + strlen(CMD_PERF_DIR), 1); if (envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--work-tree")) { @@ -131,8 +131,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) *envchanged = 1; (*argv)++; (*argc)--; - } else if (!prefixcmp(cmd, "--work-tree=")) { - setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + 12, 1); + } else if (!prefixcmp(cmd, CMD_WORK_TREE)) { + setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + strlen(CMD_WORK_TREE), 1); if (envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--debugfs-dir")) { @@ -146,8 +146,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) *envchanged = 1; (*argv)++; (*argc)--; - } else if (!prefixcmp(cmd, "--debugfs-dir=")) { - strncpy(debugfs_mntpt, cmd + 14, MAXPATHLEN); + } else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) { + strncpy(debugfs_mntpt, cmd + strlen(CMD_DEBUGFS_DIR), MAXPATHLEN); debugfs_mntpt[MAXPATHLEN - 1] = '\0'; if (envchanged) *envchanged = 1; diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h index f26172c0c919..918eb376abe3 100644 --- a/tools/perf/util/cache.h +++ b/tools/perf/util/cache.h @@ -5,6 +5,11 @@ #include "strbuf.h" #include "../perf.h" +#define CMD_EXEC_PATH "--exec-path" +#define CMD_PERF_DIR "--perf-dir=" +#define CMD_WORK_TREE "--work-tree=" +#define CMD_DEBUGFS_DIR "--debugfs-dir=" + #define PERF_DIR_ENVIRONMENT "PERF_DIR" #define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE" #define DEFAULT_PERF_DIR_ENVIRONMENT ".perf" -- cgit v1.2.3 From f4f0b418188cc7995375acbb54e87c80f21861bd Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Tue, 13 Oct 2009 14:57:20 +0200 Subject: perf tools: Remove expensive old debug code from perf top Calling gettimeofday() at high frequency is painful for handicapped boxen. The spot calling gettimeofday() is old unneeded debug code, so remove it. Reported-by: Ingo Molnar Signed-off-by: Mike Galbraith Cc: Peter Zijlstra Cc: Peter Zijlstra LKML-Reference: <1255438640.7173.1.camel@marge.simson.net> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index c0f69e80b2cc..2d8806bac258 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -870,8 +870,6 @@ static unsigned int mmap_read_head(struct mmap_data *md) return head; } -struct timeval last_read, this_read; - static void mmap_read_counter(struct mmap_data *md) { unsigned int head = mmap_read_head(md); @@ -879,8 +877,6 @@ static void mmap_read_counter(struct mmap_data *md) unsigned char *data = md->base + page_size; int diff; - gettimeofday(&this_read, NULL); - /* * If we're further behind than half the buffer, there's a chance * the writer will bite our tail and mess up the samples under us. @@ -891,14 +887,7 @@ static void mmap_read_counter(struct mmap_data *md) */ diff = head - old; if (diff > md->mask / 2 || diff < 0) { - struct timeval iv; - unsigned long msecs; - - timersub(&this_read, &last_read, &iv); - msecs = iv.tv_sec*1000 + iv.tv_usec/1000; - - fprintf(stderr, "WARNING: failed to keep up with mmap data." - " Last read %lu msecs ago.\n", msecs); + fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); /* * head points to a known good entry, start there. @@ -906,8 +895,6 @@ static void mmap_read_counter(struct mmap_data *md) old = head; } - last_read = this_read; - for (; old != head;) { event_t *event = (event_t *)&data[old & md->mask]; -- cgit v1.2.3 From d5b889f2ecec7849e851ddd31c34bdfb3482b5de Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 13 Oct 2009 11:16:29 -0300 Subject: perf tools: Move threads & last_match to threads.c This was just being copy'n'pasted all over. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <20091013141629.GD21809@ghostprotocols.net> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 25 +++++++------------------ tools/perf/builtin-report.c | 26 +++++++------------------- tools/perf/builtin-sched.c | 30 +++++++++++------------------- tools/perf/builtin-trace.c | 13 +++---------- tools/perf/util/thread.c | 27 ++++++++++++++------------- tools/perf/util/thread.h | 8 +++----- 6 files changed, 45 insertions(+), 84 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 8c84320ecb06..3fe0de03004d 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -37,10 +37,6 @@ static int print_line; static unsigned long page_size; static unsigned long mmap_window = 32; -static struct rb_root threads; -static struct thread *last_match; - - struct sym_ext { struct rb_node node; double percent; @@ -96,12 +92,10 @@ static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { char level; - struct thread *thread; u64 ip = event->ip.ip; struct map *map = NULL; struct symbol *sym = NULL; - - thread = threads__findnew(event->ip.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->ip.pid); dump_printf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", (void *)(offset + head), @@ -166,10 +160,8 @@ got_map: static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; struct map *map = map__new(&event->mmap, NULL, 0); - - thread = threads__findnew(event->mmap.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n", (void *)(offset + head), @@ -194,9 +186,8 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head) static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; + struct thread *thread = threads__findnew(event->comm.pid); - thread = threads__findnew(event->comm.pid, &threads, &last_match); dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", (void *)(offset + head), (void *)(long)(event->header.size), @@ -215,11 +206,9 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) static int process_fork_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; - struct thread *parent; + struct thread *thread = threads__findnew(event->fork.pid); + struct thread *parent = threads__findnew(event->fork.ppid); - thread = threads__findnew(event->fork.pid, &threads, &last_match); - parent = threads__findnew(event->fork.ppid, &threads, &last_match); dump_printf("%p [%p]: PERF_RECORD_FORK: %d:%d\n", (void *)(offset + head), (void *)(long)(event->header.size), @@ -558,7 +547,7 @@ static int __cmd_annotate(void) uint32_t size; char *buf; - register_idle_thread(&threads, &last_match); + register_idle_thread(); input = open(input_name, O_RDONLY); if (input < 0) { @@ -659,7 +648,7 @@ more: return 0; if (verbose > 3) - threads__fprintf(stdout, &threads); + threads__fprintf(stdout); if (verbose > 2) dsos__fprintf(stdout); diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index f57a23b19f3c..015c79745966 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -55,9 +55,6 @@ static char callchain_default_opt[] = "fractal,0.5"; static char *cwd; static int cwdlen; -static struct rb_root threads; -static struct thread *last_match; - static struct perf_header *header; static u64 sample_type; @@ -593,15 +590,13 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) { char level; struct symbol *sym = NULL; - struct thread *thread; u64 ip = event->ip.ip; u64 period = 1; struct map *map = NULL; void *more_data = event->ip.__more_data; struct ip_callchain *chain = NULL; int cpumode; - - thread = threads__findnew(event->ip.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->ip.pid); if (sample_type & PERF_SAMPLE_PERIOD) { period = *(u64 *)more_data; @@ -685,10 +680,8 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; struct map *map = map__new(&event->mmap, cwd, cwdlen); - - thread = threads__findnew(event->mmap.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", (void *)(offset + head), @@ -714,9 +707,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head) static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; - - thread = threads__findnew(event->comm.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->comm.pid); dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", (void *)(offset + head), @@ -736,11 +727,8 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) static int process_task_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; - struct thread *parent; - - thread = threads__findnew(event->fork.pid, &threads, &last_match); - parent = threads__findnew(event->fork.ppid, &threads, &last_match); + struct thread *thread = threads__findnew(event->fork.pid); + struct thread *parent = threads__findnew(event->fork.ppid); dump_printf("%p [%p]: PERF_RECORD_%s: (%d:%d):(%d:%d)\n", (void *)(offset + head), @@ -857,7 +845,7 @@ static int __cmd_report(void) struct thread *idle; int ret; - idle = register_idle_thread(&threads, &last_match); + idle = register_idle_thread(); thread__comm_adjust(idle); if (show_threads) @@ -881,7 +869,7 @@ static int __cmd_report(void) return 0; if (verbose > 3) - threads__fprintf(stdout, &threads); + threads__fprintf(stdout); if (verbose > 2) dsos__fprintf(stdout); diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 387a44234368..73bdad029730 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -24,9 +24,6 @@ static char const *input_name = "perf.data"; static unsigned long total_comm = 0; -static struct rb_root threads; -static struct thread *last_match; - static struct perf_header *header; static u64 sample_type; @@ -641,9 +638,7 @@ static void test_calibrations(void) static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; - - thread = threads__findnew(event->comm.tid, &threads, &last_match); + struct thread *thread = threads__findnew(event->comm.tid); dump_printf("%p [%p]: perf_event_comm: %s:%d\n", (void *)(offset + head), @@ -1086,8 +1081,8 @@ latency_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %Ld < 0 ?\n", delta); - sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); - sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); + sched_out = threads__findnew(switch_event->prev_pid); + sched_in = threads__findnew(switch_event->next_pid); out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); if (!out_events) { @@ -1120,13 +1115,10 @@ latency_runtime_event(struct trace_runtime_event *runtime_event, u64 timestamp, struct thread *this_thread __used) { - struct work_atoms *atoms; - struct thread *thread; + struct thread *thread = threads__findnew(runtime_event->pid); + struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); BUG_ON(cpu >= MAX_CPUS || cpu < 0); - - thread = threads__findnew(runtime_event->pid, &threads, &last_match); - atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); if (!atoms) { thread_atoms_insert(thread); atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); @@ -1153,7 +1145,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, if (!wakeup_event->success) return; - wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); + wakee = threads__findnew(wakeup_event->pid); atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); if (!atoms) { thread_atoms_insert(wakee); @@ -1202,7 +1194,7 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, if (profile_cpu == -1) return; - migrant = threads__findnew(migrate_task_event->pid, &threads, &last_match); + migrant = threads__findnew(migrate_task_event->pid); atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); if (!atoms) { thread_atoms_insert(migrant); @@ -1458,8 +1450,8 @@ map_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %Ld < 0 ?\n", delta); - sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); - sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); + sched_out = threads__findnew(switch_event->prev_pid); + sched_in = threads__findnew(switch_event->next_pid); curr_thread[this_cpu] = sched_in; @@ -1649,7 +1641,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) if (!(sample_type & PERF_SAMPLE_RAW)) return 0; - thread = threads__findnew(event->ip.pid, &threads, &last_match); + thread = threads__findnew(event->ip.pid); if (sample_type & PERF_SAMPLE_TIME) { timestamp = *(u64 *)more_data; @@ -1725,7 +1717,7 @@ static struct perf_file_handler file_handler = { static int read_events(void) { - register_idle_thread(&threads, &last_match); + register_idle_thread(); register_perf_file_handler(&file_handler); return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index fb3f3c220211..ccf867dbab5c 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -19,9 +19,6 @@ static char const *input_name = "perf.data"; static unsigned long total = 0; static unsigned long total_comm = 0; -static struct rb_root threads; -static struct thread *last_match; - static struct perf_header *header; static u64 sample_type; @@ -32,9 +29,7 @@ static int cwdlen; static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; - - thread = threads__findnew(event->comm.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->comm.pid); dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", (void *)(offset + head), @@ -54,14 +49,12 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; u64 ip = event->ip.ip; u64 timestamp = -1; u32 cpu = -1; u64 period = 1; void *more_data = event->ip.__more_data; - - thread = threads__findnew(event->ip.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->ip.pid); if (sample_type & PERF_SAMPLE_TIME) { timestamp = *(u64 *)more_data; @@ -135,7 +128,7 @@ static struct perf_file_handler file_handler = { static int __cmd_trace(void) { - register_idle_thread(&threads, &last_match); + register_idle_thread(); register_perf_file_handler(&file_handler); return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd); diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 3b56aebb1f4b..f53fad7c0a8d 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -6,6 +6,9 @@ #include "util.h" #include "debug.h" +static struct rb_root threads; +static struct thread *last_match; + static struct thread *thread__new(pid_t pid) { struct thread *self = calloc(1, sizeof(*self)); @@ -50,10 +53,9 @@ static size_t thread__fprintf(struct thread *self, FILE *fp) return ret; } -struct thread * -threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) +struct thread *threads__findnew(pid_t pid) { - struct rb_node **p = &threads->rb_node; + struct rb_node **p = &threads.rb_node; struct rb_node *parent = NULL; struct thread *th; @@ -62,15 +64,15 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) * so most of the time we dont have to look up * the full rbtree: */ - if (*last_match && (*last_match)->pid == pid) - return *last_match; + if (last_match && last_match->pid == pid) + return last_match; while (*p != NULL) { parent = *p; th = rb_entry(parent, struct thread, rb_node); if (th->pid == pid) { - *last_match = th; + last_match = th; return th; } @@ -83,17 +85,16 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) th = thread__new(pid); if (th != NULL) { rb_link_node(&th->rb_node, parent, p); - rb_insert_color(&th->rb_node, threads); - *last_match = th; + rb_insert_color(&th->rb_node, &threads); + last_match = th; } return th; } -struct thread * -register_idle_thread(struct rb_root *threads, struct thread **last_match) +struct thread *register_idle_thread(void) { - struct thread *thread = threads__findnew(0, threads, last_match); + struct thread *thread = threads__findnew(0); if (!thread || thread__set_comm(thread, "swapper")) { fprintf(stderr, "problem inserting idle task.\n"); @@ -197,12 +198,12 @@ int thread__fork(struct thread *self, struct thread *parent) return 0; } -size_t threads__fprintf(FILE *fp, struct rb_root *threads) +size_t threads__fprintf(FILE *fp) { size_t ret = 0; struct rb_node *nd; - for (nd = rb_first(threads); nd; nd = rb_next(nd)) { + for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { struct thread *pos = rb_entry(nd, struct thread, rb_node); ret += thread__fprintf(pos, fp); diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 845d9b62f96f..1abef3b7455d 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -15,13 +15,11 @@ struct thread { }; int thread__set_comm(struct thread *self, const char *comm); -struct thread * -threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match); -struct thread * -register_idle_thread(struct rb_root *threads, struct thread **last_match); +struct thread *threads__findnew(pid_t pid); +struct thread *register_idle_thread(void); void thread__insert_map(struct thread *self, struct map *map); int thread__fork(struct thread *self, struct thread *parent); -size_t threads__fprintf(FILE *fp, struct rb_root *threads); +size_t threads__fprintf(FILE *fp); void maps__insert(struct rb_root *maps, struct map *map); struct map *maps__find(struct rb_root *maps, u64 ip); -- cgit v1.2.3 From 194ec34184869f0de1cf255c924fc5299e1b3d27 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 13 Oct 2009 16:33:50 -0400 Subject: function-graph/x86: Replace unbalanced ret with jmp The function graph tracer replaces the return address with a hook to trace the exit of the function call. This hook will finish by returning to the real location the function should return to. But the current implementation uses a ret to jump to the real return location. This causes a imbalance between calls and ret. That is the original function does a call, the ret goes to the handler and then the handler does a ret without a matching call. Although the function graph tracer itself still breaks the branch predictor by replacing the original ret, by using a second ret and causing an imbalance, it breaks the predictor even more. This patch replaces the ret with a jmp to keep the calls and ret balanced. I tested this on one box and it showed a 1.7% increase in performance. Another box only showed a small 0.3% increase. But no box that I tested this on showed a decrease in performance by making this change. Signed-off-by: Steven Rostedt Acked-by: Mathieu Desnoyers Cc: Frederic Weisbecker LKML-Reference: <20091013203425.042034383@goodmis.org> Signed-off-by: Ingo Molnar --- arch/x86/kernel/entry_32.S | 7 ++----- arch/x86/kernel/entry_64.S | 6 +++--- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c097e7d607c6..7d52e9da5e0c 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1185,17 +1185,14 @@ END(ftrace_graph_caller) .globl return_to_handler return_to_handler: - pushl $0 pushl %eax - pushl %ecx pushl %edx movl %ebp, %eax call ftrace_return_to_handler - movl %eax, 0xc(%esp) + movl %eax, %ecx popl %edx - popl %ecx popl %eax - ret + jmp *%ecx #endif .section .rodata,"a" diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index b5c061f8f358..bd5bbddddf91 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -155,11 +155,11 @@ GLOBAL(return_to_handler) call ftrace_return_to_handler - movq %rax, 16(%rsp) + movq %rax, %rdi movq 8(%rsp), %rdx movq (%rsp), %rax - addq $16, %rsp - retq + addq $24, %rsp + jmp *%rdi #endif -- cgit v1.2.3 From 756d17ee7ee4fbc8238bdf97100af63e6ac441ef Mon Sep 17 00:00:00 2001 From: "jolsa@redhat.com" Date: Tue, 13 Oct 2009 16:33:52 -0400 Subject: tracing: Support multiple pids in set_pid_ftrace file Adding the possibility to set more than 1 pid in the set_pid_ftrace file, thus allowing to trace more than 1 independent processes. Usage: sh-4.0# echo 284 > ./set_ftrace_pid sh-4.0# cat ./set_ftrace_pid 284 sh-4.0# echo 1 >> ./set_ftrace_pid sh-4.0# echo 0 >> ./set_ftrace_pid sh-4.0# cat ./set_ftrace_pid swapper tasks 1 284 sh-4.0# echo 4 > ./set_ftrace_pid sh-4.0# cat ./set_ftrace_pid 4 sh-4.0# echo > ./set_ftrace_pid sh-4.0# cat ./set_ftrace_pid no pid sh-4.0# Signed-off-by: Jiri Olsa Cc: Frederic Weisbecker LKML-Reference: <20091013203425.565454612@goodmis.org> Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/ftrace.c | 234 +++++++++++++++++++++++++++++++++++--------------- kernel/trace/trace.h | 4 +- 2 files changed, 167 insertions(+), 71 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 45c965919cff..0c799d1af702 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -60,6 +60,13 @@ static int last_ftrace_enabled; /* Quick disabling of function tracer. */ int function_trace_stop; +/* List for set_ftrace_pid's pids. */ +LIST_HEAD(ftrace_pids); +struct ftrace_pid { + struct list_head list; + struct pid *pid; +}; + /* * ftrace_disabled is set when an anomaly is discovered. * ftrace_disabled is much stronger than ftrace_enabled. @@ -159,7 +166,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) else func = ftrace_list_func; - if (ftrace_pid_trace) { + if (!list_empty(&ftrace_pids)) { set_ftrace_pid_function(func); func = ftrace_pid_func; } @@ -207,7 +214,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) if (ftrace_list->next == &ftrace_list_end) { ftrace_func_t func = ftrace_list->func; - if (ftrace_pid_trace) { + if (!list_empty(&ftrace_pids)) { set_ftrace_pid_function(func); func = ftrace_pid_func; } @@ -235,7 +242,7 @@ static void ftrace_update_pid_func(void) func = __ftrace_trace_function; #endif - if (ftrace_pid_trace) { + if (!list_empty(&ftrace_pids)) { set_ftrace_pid_function(func); func = ftrace_pid_func; } else { @@ -825,8 +832,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) } #endif /* CONFIG_FUNCTION_PROFILER */ -/* set when tracing only a pid */ -struct pid *ftrace_pid_trace; static struct pid * const ftrace_swapper_pid = &init_struct_pid; #ifdef CONFIG_DYNAMIC_FTRACE @@ -2758,23 +2763,6 @@ static inline void ftrace_startup_enable(int command) { } # define ftrace_shutdown_sysctl() do { } while (0) #endif /* CONFIG_DYNAMIC_FTRACE */ -static ssize_t -ftrace_pid_read(struct file *file, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - char buf[64]; - int r; - - if (ftrace_pid_trace == ftrace_swapper_pid) - r = sprintf(buf, "swapper tasks\n"); - else if (ftrace_pid_trace) - r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace)); - else - r = sprintf(buf, "no pid\n"); - - return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); -} - static void clear_ftrace_swapper(void) { struct task_struct *p; @@ -2825,14 +2813,12 @@ static void set_ftrace_pid(struct pid *pid) rcu_read_unlock(); } -static void clear_ftrace_pid_task(struct pid **pid) +static void clear_ftrace_pid_task(struct pid *pid) { - if (*pid == ftrace_swapper_pid) + if (pid == ftrace_swapper_pid) clear_ftrace_swapper(); else - clear_ftrace_pid(*pid); - - *pid = NULL; + clear_ftrace_pid(pid); } static void set_ftrace_pid_task(struct pid *pid) @@ -2843,11 +2829,140 @@ static void set_ftrace_pid_task(struct pid *pid) set_ftrace_pid(pid); } +static int ftrace_pid_add(int p) +{ + struct pid *pid; + struct ftrace_pid *fpid; + int ret = -EINVAL; + + mutex_lock(&ftrace_lock); + + if (!p) + pid = ftrace_swapper_pid; + else + pid = find_get_pid(p); + + if (!pid) + goto out; + + ret = 0; + + list_for_each_entry(fpid, &ftrace_pids, list) + if (fpid->pid == pid) + goto out_put; + + ret = -ENOMEM; + + fpid = kmalloc(sizeof(*fpid), GFP_KERNEL); + if (!fpid) + goto out_put; + + list_add(&fpid->list, &ftrace_pids); + fpid->pid = pid; + + set_ftrace_pid_task(pid); + + ftrace_update_pid_func(); + ftrace_startup_enable(0); + + mutex_unlock(&ftrace_lock); + return 0; + +out_put: + if (pid != ftrace_swapper_pid) + put_pid(pid); + +out: + mutex_unlock(&ftrace_lock); + return ret; +} + +static void ftrace_pid_reset(void) +{ + struct ftrace_pid *fpid, *safe; + + mutex_lock(&ftrace_lock); + list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) { + struct pid *pid = fpid->pid; + + clear_ftrace_pid_task(pid); + + list_del(&fpid->list); + kfree(fpid); + } + + ftrace_update_pid_func(); + ftrace_startup_enable(0); + + mutex_unlock(&ftrace_lock); +} + +static void *fpid_start(struct seq_file *m, loff_t *pos) +{ + mutex_lock(&ftrace_lock); + + if (list_empty(&ftrace_pids) && (!*pos)) + return (void *) 1; + + return seq_list_start(&ftrace_pids, *pos); +} + +static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) +{ + if (v == (void *)1) + return NULL; + + return seq_list_next(v, &ftrace_pids, pos); +} + +static void fpid_stop(struct seq_file *m, void *p) +{ + mutex_unlock(&ftrace_lock); +} + +static int fpid_show(struct seq_file *m, void *v) +{ + const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); + + if (v == (void *)1) { + seq_printf(m, "no pid\n"); + return 0; + } + + if (fpid->pid == ftrace_swapper_pid) + seq_printf(m, "swapper tasks\n"); + else + seq_printf(m, "%u\n", pid_vnr(fpid->pid)); + + return 0; +} + +static const struct seq_operations ftrace_pid_sops = { + .start = fpid_start, + .next = fpid_next, + .stop = fpid_stop, + .show = fpid_show, +}; + +static int +ftrace_pid_open(struct inode *inode, struct file *file) +{ + int ret = 0; + + if ((file->f_mode & FMODE_WRITE) && + (file->f_flags & O_TRUNC)) + ftrace_pid_reset(); + + if (file->f_mode & FMODE_READ) + ret = seq_open(file, &ftrace_pid_sops); + + return ret; +} + static ssize_t ftrace_pid_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { - struct pid *pid; char buf[64]; long val; int ret; @@ -2860,57 +2975,38 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, buf[cnt] = 0; + /* + * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" + * to clean the filter quietly. + */ + strstrip(buf); + if (strlen(buf) == 0) + return 1; + ret = strict_strtol(buf, 10, &val); if (ret < 0) return ret; - mutex_lock(&ftrace_lock); - if (val < 0) { - /* disable pid tracing */ - if (!ftrace_pid_trace) - goto out; - - clear_ftrace_pid_task(&ftrace_pid_trace); - - } else { - /* swapper task is special */ - if (!val) { - pid = ftrace_swapper_pid; - if (pid == ftrace_pid_trace) - goto out; - } else { - pid = find_get_pid(val); - - if (pid == ftrace_pid_trace) { - put_pid(pid); - goto out; - } - } - - if (ftrace_pid_trace) - clear_ftrace_pid_task(&ftrace_pid_trace); - - if (!pid) - goto out; - - ftrace_pid_trace = pid; - - set_ftrace_pid_task(ftrace_pid_trace); - } + ret = ftrace_pid_add(val); - /* update the function call */ - ftrace_update_pid_func(); - ftrace_startup_enable(0); + return ret ? ret : cnt; +} - out: - mutex_unlock(&ftrace_lock); +static int +ftrace_pid_release(struct inode *inode, struct file *file) +{ + if (file->f_mode & FMODE_READ) + seq_release(inode, file); - return cnt; + return 0; } static const struct file_operations ftrace_pid_fops = { - .read = ftrace_pid_read, - .write = ftrace_pid_write, + .open = ftrace_pid_open, + .write = ftrace_pid_write, + .read = seq_read, + .llseek = seq_lseek, + .release = ftrace_pid_release, }; static __init int ftrace_init_debugfs(void) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f22a7ac32380..acef8b4636f0 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -496,12 +496,12 @@ print_graph_function(struct trace_iterator *iter) } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -extern struct pid *ftrace_pid_trace; +extern struct list_head ftrace_pids; #ifdef CONFIG_FUNCTION_TRACER static inline int ftrace_trace_task(struct task_struct *task) { - if (!ftrace_pid_trace) + if (list_empty(&ftrace_pids)) return 1; return test_tsk_trace_trace(task); -- cgit v1.2.3 From 5cb084bb1f3fd4dcdaf7e4cf564994346ec8f783 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 13 Oct 2009 16:33:53 -0400 Subject: tracing: Enable records during the module load I was debuging some module using "function" and "function_graph" tracers and noticed, that if you load module after you enabled tracing, the module's hooks will convert only to NOP instructions. The attached patch enables modules' hooks if there's function trace allready on, thus allowing to trace module functions. Signed-off-by: Jiri Olsa Cc: Frederic Weisbecker Signed-off-by: Steven Rostedt LKML-Reference: <20091013203425.896285120@goodmis.org> Signed-off-by: Ingo Molnar --- kernel/trace/ftrace.c | 38 ++++++++++++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0c799d1af702..aaea9cda8781 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1270,12 +1270,34 @@ static int ftrace_update_code(struct module *mod) ftrace_new_addrs = p->newlist; p->flags = 0L; - /* convert record (i.e, patch mcount-call with NOP) */ - if (ftrace_code_disable(mod, p)) { - p->flags |= FTRACE_FL_CONVERTED; - ftrace_update_cnt++; - } else + /* + * Do the initial record convertion from mcount jump + * to the NOP instructions. + */ + if (!ftrace_code_disable(mod, p)) { ftrace_free_rec(p); + continue; + } + + p->flags |= FTRACE_FL_CONVERTED; + ftrace_update_cnt++; + + /* + * If the tracing is enabled, go ahead and enable the record. + * + * The reason not to enable the record immediatelly is the + * inherent check of ftrace_make_nop/ftrace_make_call for + * correct previous instructions. Making first the NOP + * conversion puts the module to the correct state, thus + * passing the ftrace_make_call check. + */ + if (ftrace_start_up) { + int failed = __ftrace_replace_code(p, 1); + if (failed) { + ftrace_bug(failed, p->ip); + ftrace_free_rec(p); + } + } } stop = ftrace_now(raw_smp_processor_id()); @@ -2609,7 +2631,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) return 0; } -static int ftrace_convert_nops(struct module *mod, +static int ftrace_process_locs(struct module *mod, unsigned long *start, unsigned long *end) { @@ -2669,7 +2691,7 @@ static void ftrace_init_module(struct module *mod, { if (ftrace_disabled || start == end) return; - ftrace_convert_nops(mod, start, end); + ftrace_process_locs(mod, start, end); } static int ftrace_module_notify(struct notifier_block *self, @@ -2730,7 +2752,7 @@ void __init ftrace_init(void) last_ftrace_enabled = ftrace_enabled = 1; - ret = ftrace_convert_nops(NULL, + ret = ftrace_process_locs(NULL, __start_mcount_loc, __stop_mcount_loc); -- cgit v1.2.3 From 4d8289494a37e19cd7f3beacea9c957ad3debad6 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 13 Oct 2009 16:33:54 -0400 Subject: tracing: Enable "__cold" functions Based on the commit: a586df06 "x86: Support __attribute__((__cold__)) in gcc 4.3" some of the functions goes to the ".text.unlikely" section. Looks like there's not many of them (I found printk, panic, __ssb_dma_not_implemented, fat_fs_error), but still worth to include I think. Signed-off-by: Jiri Olsa Cc: Frederic Weisbecker Signed-off-by: Steven Rostedt LKML-Reference: <20091013203426.175845614@goodmis.org> Signed-off-by: Ingo Molnar --- scripts/recordmcount.pl | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 090d300d7394..bfb8b2cdd92a 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -119,6 +119,7 @@ my %text_sections = ( ".sched.text" => 1, ".spinlock.text" => 1, ".irqentry.text" => 1, + ".text.unlikely" => 1, ); $objdump = "objdump" if ((length $objdump) == 0); -- cgit v1.2.3 From 03541f8b69c058162e4cf9675ec9181e6a204d55 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 14 Oct 2009 16:58:03 +1100 Subject: perf_event: Adjust frequency and unthrottle for non-group-leader events The loop in perf_ctx_adjust_freq checks the frequency of sampling event counters, and adjusts the event interval and unthrottles the event if required, and resets the interrupt count for the event. However, at present it only looks at group leaders. This means that a sampling event that is not a group leader will eventually get throttled, once its interrupt count reaches sysctl_perf_event_sample_rate/HZ --- and that is guaranteed to happen, if the event is active for long enough, since the interrupt count never gets reset. Once it is throttled it never gets unthrottled, so it basically just stops working at that point. This fixes it by making perf_ctx_adjust_freq use ctx->event_list rather than ctx->group_list. The existing spin_lock/spin_unlock around the loop makes it unnecessary to put rcu_read_lock/ rcu_read_unlock around the list_for_each_entry_rcu(). Reported-by: Mark W. Krentel Signed-off-by: Paul Mackerras Cc: Corey Ashford Cc: Peter Zijlstra LKML-Reference: <19157.26731.855609.165622@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 9d0b5c665883..afb7ef3dbc44 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1355,7 +1355,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) u64 interrupts, freq; spin_lock(&ctx->lock); - list_for_each_entry(event, &ctx->group_list, group_entry) { + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (event->state != PERF_EVENT_STATE_ACTIVE) continue; -- cgit v1.2.3 From c44fc770845163f8d9e573f37f92a7b7a7ade14e Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 19 Sep 2009 06:50:42 +0200 Subject: tracing: Move syscalls metadata handling from arch to core Most of the syscalls metadata processing is done from arch. But these operations are mostly generic accross archs. Especially now that we have a common variable name that expresses the number of syscalls supported by an arch: NR_syscalls, the only remaining bits that need to reside in arch is the syscall nr to addr translation. v2: Compare syscalls symbols only after the "sys" prefix so that we avoid spurious mismatches with archs that have syscalls wrappers, in which case syscalls symbols have "SyS" prefixed aliases. (Reported by: Heiko Carstens) Signed-off-by: Frederic Weisbecker Acked-by: Heiko Carstens Cc: Ingo Molnar Cc: Steven Rostedt Cc: Li Zefan Cc: Masami Hiramatsu Cc: Jason Baron Cc: Lai Jiangshan Cc: Martin Schwidefsky Cc: Paul Mundt --- arch/s390/kernel/ftrace.c | 67 +-------------------------------- arch/x86/kernel/ftrace.c | 76 +------------------------------------- include/trace/syscall.h | 2 +- kernel/trace/trace_syscalls.c | 86 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 91 insertions(+), 140 deletions(-) diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 57bdcb1e3cdf..7c5752c3423d 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -206,73 +206,10 @@ out: #ifdef CONFIG_FTRACE_SYSCALLS -extern unsigned long __start_syscalls_metadata[]; -extern unsigned long __stop_syscalls_metadata[]; extern unsigned int sys_call_table[]; -static struct syscall_metadata **syscalls_metadata; - -struct syscall_metadata *syscall_nr_to_meta(int nr) -{ - if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) - return NULL; - - return syscalls_metadata[nr]; -} - -int syscall_name_to_nr(char *name) -{ - int i; - - if (!syscalls_metadata) - return -1; - for (i = 0; i < NR_syscalls; i++) - if (syscalls_metadata[i]) - if (!strcmp(syscalls_metadata[i]->name, name)) - return i; - return -1; -} - -void set_syscall_enter_id(int num, int id) -{ - syscalls_metadata[num]->enter_id = id; -} - -void set_syscall_exit_id(int num, int id) +unsigned long __init arch_syscall_addr(int nr) { - syscalls_metadata[num]->exit_id = id; -} - -static struct syscall_metadata *find_syscall_meta(unsigned long syscall) -{ - struct syscall_metadata *start; - struct syscall_metadata *stop; - char str[KSYM_SYMBOL_LEN]; - - start = (struct syscall_metadata *)__start_syscalls_metadata; - stop = (struct syscall_metadata *)__stop_syscalls_metadata; - kallsyms_lookup(syscall, NULL, NULL, NULL, str); - - for ( ; start < stop; start++) { - if (start->name && !strcmp(start->name + 3, str + 3)) - return start; - } - return NULL; -} - -static int __init arch_init_ftrace_syscalls(void) -{ - struct syscall_metadata *meta; - int i; - syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, - GFP_KERNEL); - if (!syscalls_metadata) - return -ENOMEM; - for (i = 0; i < NR_syscalls; i++) { - meta = find_syscall_meta((unsigned long)sys_call_table[i]); - syscalls_metadata[i] = meta; - } - return 0; + return (unsigned long)sys_call_table[nr]; } -arch_initcall(arch_init_ftrace_syscalls); #endif diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 25e6f5fc4b1e..5a1b9758fd62 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -470,82 +470,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, #ifdef CONFIG_FTRACE_SYSCALLS -extern unsigned long __start_syscalls_metadata[]; -extern unsigned long __stop_syscalls_metadata[]; extern unsigned long *sys_call_table; -static struct syscall_metadata **syscalls_metadata; - -static struct syscall_metadata *find_syscall_meta(unsigned long *syscall) -{ - struct syscall_metadata *start; - struct syscall_metadata *stop; - char str[KSYM_SYMBOL_LEN]; - - - start = (struct syscall_metadata *)__start_syscalls_metadata; - stop = (struct syscall_metadata *)__stop_syscalls_metadata; - kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str); - - for ( ; start < stop; start++) { - if (start->name && !strcmp(start->name, str)) - return start; - } - return NULL; -} - -struct syscall_metadata *syscall_nr_to_meta(int nr) -{ - if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) - return NULL; - - return syscalls_metadata[nr]; -} - -int syscall_name_to_nr(char *name) -{ - int i; - - if (!syscalls_metadata) - return -1; - - for (i = 0; i < NR_syscalls; i++) { - if (syscalls_metadata[i]) { - if (!strcmp(syscalls_metadata[i]->name, name)) - return i; - } - } - return -1; -} - -void set_syscall_enter_id(int num, int id) -{ - syscalls_metadata[num]->enter_id = id; -} - -void set_syscall_exit_id(int num, int id) +unsigned long __init arch_syscall_addr(int nr) { - syscalls_metadata[num]->exit_id = id; -} - -static int __init arch_init_ftrace_syscalls(void) -{ - int i; - struct syscall_metadata *meta; - unsigned long **psys_syscall_table = &sys_call_table; - - syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * - NR_syscalls, GFP_KERNEL); - if (!syscalls_metadata) { - WARN_ON(1); - return -ENOMEM; - } - - for (i = 0; i < NR_syscalls; i++) { - meta = find_syscall_meta(psys_syscall_table[i]); - syscalls_metadata[i] = meta; - } - return 0; + return (unsigned long)(&sys_call_table)[nr]; } -arch_initcall(arch_init_ftrace_syscalls); #endif diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 5dc283ba5ae0..e972f0a40f8d 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -33,7 +33,7 @@ struct syscall_metadata { }; #ifdef CONFIG_FTRACE_SYSCALLS -extern struct syscall_metadata *syscall_nr_to_meta(int nr); +extern unsigned long arch_syscall_addr(int nr); extern int syscall_name_to_nr(char *name); void set_syscall_enter_id(int num, int id); void set_syscall_exit_id(int num, int id); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 9fbce6c9d2e1..8bda4bff2286 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -14,6 +14,69 @@ static int sys_refcount_exit; static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); +extern unsigned long __start_syscalls_metadata[]; +extern unsigned long __stop_syscalls_metadata[]; + +static struct syscall_metadata **syscalls_metadata; + +static struct syscall_metadata *find_syscall_meta(unsigned long syscall) +{ + struct syscall_metadata *start; + struct syscall_metadata *stop; + char str[KSYM_SYMBOL_LEN]; + + + start = (struct syscall_metadata *)__start_syscalls_metadata; + stop = (struct syscall_metadata *)__stop_syscalls_metadata; + kallsyms_lookup(syscall, NULL, NULL, NULL, str); + + for ( ; start < stop; start++) { + /* + * Only compare after the "sys" prefix. Archs that use + * syscall wrappers may have syscalls symbols aliases prefixed + * with "SyS" instead of "sys", leading to an unwanted + * mismatch. + */ + if (start->name && !strcmp(start->name + 3, str + 3)) + return start; + } + return NULL; +} + +static struct syscall_metadata *syscall_nr_to_meta(int nr) +{ + if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) + return NULL; + + return syscalls_metadata[nr]; +} + +int syscall_name_to_nr(char *name) +{ + int i; + + if (!syscalls_metadata) + return -1; + + for (i = 0; i < NR_syscalls; i++) { + if (syscalls_metadata[i]) { + if (!strcmp(syscalls_metadata[i]->name, name)) + return i; + } + } + return -1; +} + +void set_syscall_enter_id(int num, int id) +{ + syscalls_metadata[num]->enter_id = id; +} + +void set_syscall_exit_id(int num, int id) +{ + syscalls_metadata[num]->exit_id = id; +} + enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags) { @@ -375,6 +438,29 @@ struct trace_event event_syscall_exit = { .trace = print_syscall_exit, }; +int __init init_ftrace_syscalls(void) +{ + struct syscall_metadata *meta; + unsigned long addr; + int i; + + syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * + NR_syscalls, GFP_KERNEL); + if (!syscalls_metadata) { + WARN_ON(1); + return -ENOMEM; + } + + for (i = 0; i < NR_syscalls; i++) { + addr = arch_syscall_addr(i); + meta = find_syscall_meta(addr); + syscalls_metadata[i] = meta; + } + + return 0; +} +core_initcall(init_ftrace_syscalls); + #ifdef CONFIG_EVENT_PROFILE static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); -- cgit v1.2.3 From 459c6d15a0c52bae43842ff2cd0dd41aa7de9b7f Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 19 Sep 2009 07:14:15 +0200 Subject: tracing: Document HAVE_SYSCALL_TRACEPOINTS needs Document the arch needed requirements to get the support for syscalls tracing. v2: HAVE_FTRACE_SYSCALLS have been changed to HAVE_SYSCALL_TRACEPOINTS recently. Update this config name in the documentation then. Signed-off-by: Frederic Weisbecker Acked-by: Heiko Carstens Cc: Ingo Molnar Cc: Steven Rostedt Cc: Li Zefan Cc: Masami Hiramatsu Cc: Jason Baron Cc: Lai Jiangshan Cc: Martin Schwidefsky Cc: Paul Mundt --- Documentation/trace/ftrace-design.txt | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/Documentation/trace/ftrace-design.txt b/Documentation/trace/ftrace-design.txt index 7003e10f10f5..641a1ef2a7ff 100644 --- a/Documentation/trace/ftrace-design.txt +++ b/Documentation/trace/ftrace-design.txt @@ -213,10 +213,19 @@ If you can't trace NMI functions, then skip this option.
-HAVE_FTRACE_SYSCALLS +HAVE_SYSCALL_TRACEPOINTS --------------------- -
+You need very few things to get the syscalls tracing in an arch. + +- Have a NR_syscalls variable in that provides the number + of syscalls supported by the arch. +- Implement arch_syscall_addr() that resolves a syscall address from a + syscall number. +- Support the TIF_SYSCALL_TRACEPOINT thread flags +- Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace + in the ptrace syscalls tracing path. +- Tag this arch as HAVE_SYSCALL_TRACEPOINTS. HAVE_FTRACE_MCOUNT_RECORD -- cgit v1.2.3 From 06f43d66ec36388056f5c697bf1e67c0e0a1645c Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 14 Oct 2009 20:43:39 +0200 Subject: ftrace: Copy ftrace_graph_filter boot param using strlcpy We are using strncpy in the wrong way to copy the ftrace_graph_filter boot param because we pass the buffer size instead of the max string size it can contain (buffer size - 1). The end result might not be NULL terminated as we are abusing the max string size. Lets use strlcpy() instead. Reported-by: Li Zefan Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt --- kernel/trace/ftrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index aaea9cda8781..b10c0d90a6ff 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2293,7 +2293,7 @@ __setup("ftrace_filter=", set_ftrace_filter); #ifdef CONFIG_FUNCTION_GRAPH_TRACER static int __init set_graph_function(char *str) { - strncpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); + strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); return 1; } __setup("ftrace_graph_filter=", set_graph_function); -- cgit v1.2.3 From 1beee96bae0daf7f491356777c3080cc436950f5 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 14 Oct 2009 20:50:32 +0200 Subject: ftrace: Rename set_bootup_ftrace into set_cmdline_ftrace set_cmdline_ftrace is a better match against what does this function: apply a tracer name from the kernel command line. Reported-by: Steven Rostedt Signed-off-by: Frederic Weisbecker Cc: Li Zefan --- kernel/trace/trace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4311ec3062f0..026e715a0c7a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -129,7 +129,7 @@ static int tracing_set_tracer(const char *buf); static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; static char *default_bootup_tracer; -static int __init set_bootup_ftrace(char *str) +static int __init set_cmdline_ftrace(char *str) { strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); default_bootup_tracer = bootup_tracer_buf; @@ -137,7 +137,7 @@ static int __init set_bootup_ftrace(char *str) ring_buffer_expanded = 1; return 1; } -__setup("ftrace=", set_bootup_ftrace); +__setup("ftrace=", set_cmdline_ftrace); static int __init set_ftrace_dump_on_oops(char *str) { -- cgit v1.2.3 From 924a79af2cdee26a034b9bdce8c9c76995b5c901 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:32 -0400 Subject: perf tools: Handle print concatenations in event format file kmem_alloc ftrace event format had a string that was broken up by two tokens. "string 1" "string 2". This patch lets the parser be able to handle the concatenation. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194357.253818714@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index eef60df7a5bf..a05c7144aded 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1734,6 +1734,7 @@ static int event_read_print(struct event *event) if (read_expect_type(EVENT_DQUOTE, &token) < 0) goto fail; + concat: event->print_fmt.format = token; event->print_fmt.args = NULL; @@ -1743,6 +1744,21 @@ static int event_read_print(struct event *event) if (type == EVENT_NONE) return 0; + /* Handle concatination of print lines */ + if (type == EVENT_DQUOTE) { + char *cat; + + cat = malloc_or_die(strlen(event->print_fmt.format) + + strlen(token) + 1); + strcpy(cat, event->print_fmt.format); + strcat(cat, token); + free_token(token); + free_token(event->print_fmt.format); + event->print_fmt.format = NULL; + token = cat; + goto concat; + } + if (test_type_token(type, token, EVENT_DELIM, (char *)",")) goto fail; -- cgit v1.2.3 From 91ff2bc191827f0d3f5ad0a433ff7df7d2dd9aee Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:33 -0400 Subject: perf tools: Fix backslash processing on trace print formats The handling of backslashes was broken. It would stop parsing when encountering one. Also, '\n', '\t', '\r' and '\\' were not converted. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194357.521974680@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index a05c7144aded..2b75ec2f57e8 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -522,7 +522,10 @@ static enum event_type __read_token(char **tok) last_ch = ch; ch = __read_char(); buf[i++] = ch; - } while (ch != quote_ch && last_ch != '\\'); + /* the '\' '\' will cancel itself */ + if (ch == '\\' && last_ch == '\\') + last_ch = 0; + } while (ch != quote_ch || last_ch == '\\'); /* remove the last quote */ i--; goto out; @@ -2325,7 +2328,27 @@ static void pretty_print(void *data, int size, struct event *event) for (; *ptr; ptr++) { ls = 0; - if (*ptr == '%') { + if (*ptr == '\\') { + ptr++; + switch (*ptr) { + case 'n': + printf("\n"); + break; + case 't': + printf("\t"); + break; + case 'r': + printf("\r"); + break; + case '\\': + printf("\\"); + break; + default: + printf("%c", *ptr); + break; + } + + } else if (*ptr == '%') { saveptr = ptr; show_func = 0; cont_process: -- cgit v1.2.3 From 298ebc3ef2a6c569b3eb51651f04e26aecbf8a1d Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:34 -0400 Subject: perf tools: Handle trace parsing of < and > The code to handle the '<' and '>' ops was all in place, but they were not in the switch statement to consider them as valid ops. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194357.807434040@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 2b75ec2f57e8..3e643f5da202 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1170,6 +1170,8 @@ process_op(struct event *event, struct print_arg *arg, char **tok) strcmp(token, "*") == 0 || strcmp(token, "^") == 0 || strcmp(token, "/") == 0 || + strcmp(token, "<") == 0 || + strcmp(token, ">") == 0 || strcmp(token, "==") == 0 || strcmp(token, "!=") == 0) { -- cgit v1.2.3 From 0959b8d65ce26131c2d5ccfa518a7b76529280fa Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:35 -0400 Subject: perf tools: Handle arrays in print fields for trace parsing The array used by the ftrace stack events (caller[x]) causes issues with the parser. This adds code to handle the case, but it also assumes that the array is of type long. Note, this is a special case used (currently) only by the ftrace user and kernel stack records. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194358.124833639@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 62 +++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 3e643f5da202..7aeedb09ea7d 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1046,6 +1046,35 @@ out_free: return EVENT_ERROR; } +static enum event_type +process_array(struct event *event, struct print_arg *top, char **tok) +{ + struct print_arg *arg; + enum event_type type; + char *token = NULL; + + arg = malloc_or_die(sizeof(*arg)); + memset(arg, 0, sizeof(*arg)); + + *tok = NULL; + type = process_arg(event, arg, &token); + if (test_type_token(type, token, EVENT_OP, (char *)"]")) + goto out_free; + + top->op.right = arg; + + free_token(token); + type = read_token_item(&token); + *tok = token; + + return type; + +out_free: + free_token(*tok); + free_arg(arg); + return EVENT_ERROR; +} + static int get_op_prio(char *op) { if (!op[1]) { @@ -1192,6 +1221,18 @@ process_op(struct event *event, struct print_arg *arg, char **tok) arg->op.right = right; + } else if (strcmp(token, "[") == 0) { + + left = malloc_or_die(sizeof(*left)); + *left = *arg; + + arg->type = PRINT_OP; + arg->op.op = token; + arg->op.left = left; + + arg->op.prio = 0; + type = process_array(event, arg, tok); + } else { die("unknown op '%s'", token); /* the arg is now the left side */ @@ -1931,6 +1972,7 @@ static unsigned long long eval_num_arg(void *data, int size, { unsigned long long val = 0; unsigned long long left, right; + struct print_arg *larg; switch (arg->type) { case PRINT_NULL: @@ -1957,6 +1999,26 @@ static unsigned long long eval_num_arg(void *data, int size, return 0; break; case PRINT_OP: + if (strcmp(arg->op.op, "[") == 0) { + /* + * Arrays are special, since we don't want + * to read the arg as is. + */ + if (arg->op.left->type != PRINT_FIELD) + goto default_op; /* oops, all bets off */ + larg = arg->op.left; + if (!larg->field.field) { + larg->field.field = + find_any_field(event, larg->field.name); + if (!larg->field.field) + die("field %s not found", larg->field.name); + } + right = eval_num_arg(data, size, event, arg->op.right); + val = read_size(data + larg->field.field->offset + + right * long_size, long_size); + break; + } + default_op: left = eval_num_arg(data, size, event, arg->op.left); right = eval_num_arg(data, size, event, arg->op.right); switch (arg->op.op[0]) { -- cgit v1.2.3 From b99af874829cba2b30d212bc6fd31b56275ee4d2 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:36 -0400 Subject: perf tools: Handle * as typecast in trace parsing The '*' is currently only treated as a multiplication, and it needs to be handled as a typecast pointer. This is the version used by trace-cmd. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194358.409327875@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 50 ++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 28 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 7aeedb09ea7d..f73ee55b51e8 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1217,7 +1217,24 @@ process_op(struct event *event, struct print_arg *arg, char **tok) right = malloc_or_die(sizeof(*right)); - type = process_arg(event, right, tok); + type = read_token_item(&token); + *tok = token; + + /* could just be a type pointer */ + if ((strcmp(arg->op.op, "*") == 0) && + type == EVENT_DELIM && (strcmp(token, ")") == 0)) { + if (left->type != PRINT_ATOM) + die("bad pointer type"); + left->atom.atom = realloc(left->atom.atom, + sizeof(left->atom.atom) + 3); + strcat(left->atom.atom, " *"); + *arg = *left; + free(arg); + + return type; + } + + type = process_arg_token(event, right, tok, type); arg->op.right = right; @@ -1548,7 +1565,6 @@ process_paren(struct event *event, struct print_arg *arg, char **tok) { struct print_arg *item_arg; enum event_type type; - int ptr_cast = 0; char *token; type = process_arg(event, arg, &token); @@ -1556,26 +1572,11 @@ process_paren(struct event *event, struct print_arg *arg, char **tok) if (type == EVENT_ERROR) return EVENT_ERROR; - if (type == EVENT_OP) { - /* handle the ptr casts */ - if (!strcmp(token, "*")) { - /* - * FIXME: should we zapp whitespaces before ')' ? - * (may require a peek_token_item()) - */ - if (__peek_char() == ')') { - ptr_cast = 1; - free_token(token); - type = read_token_item(&token); - } - } - if (!ptr_cast) { - type = process_op(event, arg, &token); + if (type == EVENT_OP) + type = process_op(event, arg, &token); - if (type == EVENT_ERROR) - return EVENT_ERROR; - } - } + if (type == EVENT_ERROR) + return EVENT_ERROR; if (test_type_token(type, token, EVENT_DELIM, (char *)")")) { free_token(token); @@ -1601,13 +1602,6 @@ process_paren(struct event *event, struct print_arg *arg, char **tok) item_arg = malloc_or_die(sizeof(*item_arg)); arg->type = PRINT_TYPE; - if (ptr_cast) { - char *old = arg->atom.atom; - - arg->atom.atom = malloc_or_die(strlen(old + 3)); - sprintf(arg->atom.atom, "%s *", old); - free(old); - } arg->typecast.type = arg->atom.atom; arg->typecast.item = item_arg; type = process_arg_token(event, item_arg, &token, type); -- cgit v1.2.3 From f1d1feecf07261d083859ecfef0d4399036f9683 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:37 -0400 Subject: perf tools: Handle newlines in trace parsing better New lines between args in the trace format can break the parsing. This should not be the case. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194358.637991808@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index f73ee55b51e8..59e4e4db7438 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1716,12 +1716,18 @@ process_arg_token(struct event *event, struct print_arg *arg, static int event_read_print_args(struct event *event, struct print_arg **list) { - enum event_type type; + enum event_type type = EVENT_ERROR; struct print_arg *arg; char *token; int args = 0; do { + if (type == EVENT_NEWLINE) { + free_token(token); + type = read_token_item(&token); + continue; + } + arg = malloc_or_die(sizeof(*arg)); memset(arg, 0, sizeof(*arg)); -- cgit v1.2.3 From 13999e59343b042b0807be2df6ae5895d29782a0 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:38 -0400 Subject: perf tools: Handle the case with and without the "signed" trace field The trace format files now have a "signed" field. But we should still be able to handle the kernels that do not have this field. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194358.888239553@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 81 ++++++++++++++++++++++++------------- 1 file changed, 52 insertions(+), 29 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 59e4e4db7438..0739b12675f0 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -924,23 +924,30 @@ static int event_read_fields(struct event *event, struct format_field **fields) if (read_expected(EVENT_OP, (char *)";") < 0) goto fail_expect; - if (read_expected(EVENT_ITEM, (char *)"signed") < 0) - goto fail_expect; + type = read_token(&token); + if (type != EVENT_NEWLINE) { + /* newer versions of the kernel have a "signed" type */ + if (test_type_token(type, token, EVENT_ITEM, (char *)"signed")) + goto fail; - if (read_expected(EVENT_OP, (char *)":") < 0) - goto fail_expect; + free_token(token); - if (read_expect_type(EVENT_ITEM, &token)) - goto fail; - if (strtoul(token, NULL, 0)) - field->flags |= FIELD_IS_SIGNED; - free_token(token); + if (read_expected(EVENT_OP, (char *)":") < 0) + goto fail_expect; - if (read_expected(EVENT_OP, (char *)";") < 0) - goto fail_expect; + if (read_expect_type(EVENT_ITEM, &token)) + goto fail; + + /* add signed type */ + + free_token(token); + if (read_expected(EVENT_OP, (char *)";") < 0) + goto fail_expect; + + if (read_expect_type(EVENT_NEWLINE, &token)) + goto fail; + } - if (read_expect_type(EVENT_NEWLINE, &token) < 0) - goto fail; free_token(token); *fields = field; @@ -2949,21 +2956,23 @@ static void print_args(struct print_arg *args) } } -static void parse_header_field(char *type, +static void parse_header_field(char *field, int *offset, int *size) { char *token; + int type; if (read_expected(EVENT_ITEM, (char *)"field") < 0) return; if (read_expected(EVENT_OP, (char *)":") < 0) return; + /* type */ if (read_expect_type(EVENT_ITEM, &token) < 0) - return; + goto fail; free_token(token); - if (read_expected(EVENT_ITEM, type) < 0) + if (read_expected(EVENT_ITEM, field) < 0) return; if (read_expected(EVENT_OP, (char *)";") < 0) return; @@ -2972,7 +2981,7 @@ static void parse_header_field(char *type, if (read_expected(EVENT_OP, (char *)":") < 0) return; if (read_expect_type(EVENT_ITEM, &token) < 0) - return; + goto fail; *offset = atoi(token); free_token(token); if (read_expected(EVENT_OP, (char *)";") < 0) @@ -2982,22 +2991,36 @@ static void parse_header_field(char *type, if (read_expected(EVENT_OP, (char *)":") < 0) return; if (read_expect_type(EVENT_ITEM, &token) < 0) - return; + goto fail; *size = atoi(token); free_token(token); if (read_expected(EVENT_OP, (char *)";") < 0) return; - if (read_expected(EVENT_ITEM, (char *)"signed") < 0) - return; - if (read_expected(EVENT_OP, (char *)":") < 0) - return; - if (read_expect_type(EVENT_ITEM, &token) < 0) - return; - free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) - return; - if (read_expect_type(EVENT_NEWLINE, &token) < 0) - return; + type = read_token(&token); + if (type != EVENT_NEWLINE) { + /* newer versions of the kernel have a "signed" type */ + if (type != EVENT_ITEM) + goto fail; + + if (strcmp(token, (char *)"signed") != 0) + goto fail; + + free_token(token); + + if (read_expected(EVENT_OP, (char *)":") < 0) + return; + + if (read_expect_type(EVENT_ITEM, &token)) + goto fail; + + free_token(token); + if (read_expected(EVENT_OP, (char *)";") < 0) + return; + + if (read_expect_type(EVENT_NEWLINE, &token)) + goto fail; + } + fail: free_token(token); } -- cgit v1.2.3 From 07a4bdddcf2546ccfbfb3c782deab636c371edeb Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:39 -0400 Subject: perf tools: Still continue on failed parsing of an event Even though an event may fail to parse, we should not kill the entire report. The trace should still be able to show what it can. If an event fails to parse, a warning is printed, and the output continues. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194359.190809589@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 38 ++++++++++++++++++++++++++----------- tools/perf/util/trace-event.h | 14 ++++++++------ 2 files changed, 35 insertions(+), 17 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 0739b12675f0..eda0a2488c19 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -613,7 +613,7 @@ static enum event_type read_token_item(char **tok) static int test_type(enum event_type type, enum event_type expect) { if (type != expect) { - die("Error: expected type %d but read %d", + warning("Error: expected type %d but read %d", expect, type); return -1; } @@ -624,13 +624,13 @@ static int test_type_token(enum event_type type, char *token, enum event_type expect, const char *expect_tok) { if (type != expect) { - die("Error: expected type %d but read %d", + warning("Error: expected type %d but read %d", expect, type); return -1; } if (strcmp(token, expect_tok) != 0) { - die("Error: expected '%s' but read '%s'", + warning("Error: expected '%s' but read '%s'", expect_tok, token); return -1; } @@ -668,7 +668,7 @@ static int __read_expected(enum event_type expect, const char *str, int newline_ free_token(token); - return 0; + return ret; } static int read_expected(enum event_type expect, const char *str) @@ -1258,12 +1258,12 @@ process_op(struct event *event, struct print_arg *arg, char **tok) type = process_array(event, arg, tok); } else { - die("unknown op '%s'", token); + warning("unknown op '%s'", token); + event->flags |= EVENT_FL_FAILED; /* the arg is now the left side */ return EVENT_NONE; } - if (type == EVENT_OP) { int prio; @@ -2873,7 +2873,7 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs, event = trace_find_event(type); if (!event) { - printf("ug! no event found for type %d\n", type); + warning("ug! no event found for type %d", type); return; } @@ -2887,6 +2887,12 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs, comm, pid, cpu, secs, nsecs, event->name); + if (event->flags & EVENT_FL_FAILED) { + printf("EVENT '%s' FAILED TO PARSE\n", + event->name); + return; + } + pretty_print(data, size, event); printf("\n"); } @@ -3120,12 +3126,16 @@ int parse_event_file(char *buf, unsigned long size, char *sys) die("failed to read event id"); ret = event_read_format(event); - if (ret < 0) - die("failed to read event format"); + if (ret < 0) { + warning("failed to read event format for %s", event->name); + goto event_failed; + } ret = event_read_print(event); - if (ret < 0) - die("failed to read event print fmt"); + if (ret < 0) { + warning("failed to read event print fmt for %s", event->name); + goto event_failed; + } event->system = strdup(sys); @@ -3135,6 +3145,12 @@ int parse_event_file(char *buf, unsigned long size, char *sys) add_event(event); return 0; + + event_failed: + event->flags |= EVENT_FL_FAILED; + /* still add it even if it failed */ + add_event(event); + return -1; } void parse_set_info(int nr_cpus, int long_sz) diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index da77e073c867..29821acc8db6 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -139,12 +139,14 @@ struct event { }; enum { - EVENT_FL_ISFTRACE = 1, - EVENT_FL_ISPRINT = 2, - EVENT_FL_ISBPRINT = 4, - EVENT_FL_ISFUNC = 8, - EVENT_FL_ISFUNCENT = 16, - EVENT_FL_ISFUNCRET = 32, + EVENT_FL_ISFTRACE = 0x01, + EVENT_FL_ISPRINT = 0x02, + EVENT_FL_ISBPRINT = 0x04, + EVENT_FL_ISFUNC = 0x08, + EVENT_FL_ISFUNCENT = 0x10, + EVENT_FL_ISFUNCRET = 0x20, + + EVENT_FL_FAILED = 0x80000000 }; struct record { -- cgit v1.2.3 From ffa1895561645103d8f8059b35d9c06e6eeead2e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:40 -0400 Subject: perf tools: Fix bprintk reading in trace output The bprintk parsing was broken in more ways than one. The file parsing was incorrect, and the words used by the arguments are always 4 bytes aligned, even on 64-bit machines. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194359.520931637@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index eda0a2488c19..93a82fead958 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -284,18 +284,16 @@ void parse_ftrace_printk(char *file, unsigned int size __unused) char *line; char *next = NULL; char *addr_str; - int ret; + char *fmt; int i; line = strtok_r(file, "\n", &next); while (line) { item = malloc_or_die(sizeof(*item)); - ret = sscanf(line, "%as : %as", - (float *)(void *)&addr_str, /* workaround gcc warning */ - (float *)(void *)&item->printk); + addr_str = strtok_r(line, ":", &fmt); item->addr = strtoull(addr_str, NULL, 16); - free(addr_str); - + /* fmt still has a space, skip it */ + item->printk = strdup(fmt+1); item->next = list; list = item; line = strtok_r(NULL, "\n", &next); @@ -2274,8 +2272,9 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc case 'u': case 'x': case 'i': - bptr = (void *)(((unsigned long)bptr + (long_size - 1)) & - ~(long_size - 1)); + /* the pointers are always 4 bytes aligned */ + bptr = (void *)(((unsigned long)bptr + 3) & + ~3); switch (ls) { case 0: case 1: -- cgit v1.2.3 From 0d1da915c76838c9ee7af7cdefbcb2bae9424161 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:41 -0400 Subject: perf tools: Handle both versions of ftrace output The ftrace output events can have either arguments or no arguments. The parser needs to be able to handle both. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194359.790221427@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 93a82fead958..c174765d4056 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1819,7 +1819,7 @@ static int event_read_print(struct event *event) if (ret < 0) return -1; - return 0; + return ret; fail: free_token(token); @@ -3088,6 +3088,9 @@ int parse_ftrace_file(char *buf, unsigned long size) if (ret < 0) die("failed to read ftrace event print fmt"); + /* New ftrace handles args */ + if (ret > 0) + return 0; /* * The arguments for ftrace files are parsed by the fields. * Set up the fields as their arguments. -- cgit v1.2.3 From cda48461c7fb8431a99b7960480f5f42cc1a5324 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:42 -0400 Subject: perf tools: Add latency format to trace output Add the irqs disabled, preemption count, need resched, and other info that is shown in the latency format of ftrace. # perf trace -l perf-16457 2..s2. 53636.260344: kmem_cache_free: call_site=ffffffff811198f perf-16457 2..s2. 53636.264330: kmem_cache_free: call_site=ffffffff811198f perf-16457 2d.s4. 53636.300006: kmem_cache_free: call_site=ffffffff810d889 Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194400.076588953@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-trace.c | 2 + tools/perf/util/trace-event-parse.c | 120 ++++++++++++++++++++++++++++++------ tools/perf/util/trace-event.h | 11 ++++ 3 files changed, 114 insertions(+), 19 deletions(-) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index ccf867dbab5c..ce8459ac2845 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -144,6 +144,8 @@ static const struct option options[] = { "dump raw trace in ASCII"), OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('l', "latency", &latency_format, + "show latency attributes (irqs/preemption disabled, etc)"), OPT_END() }; diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index c174765d4056..fde1a434d630 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -40,6 +40,8 @@ int header_page_size_size; int header_page_data_offset; int header_page_data_size; +int latency_format; + static char *input_buf; static unsigned long long input_buf_ptr; static unsigned long long input_buf_siz; @@ -1928,37 +1930,67 @@ static int get_common_info(const char *type, int *offset, int *size) return 0; } -int trace_parse_common_type(void *data) +static int __parse_common(void *data, int *size, int *offset, + char *name) { - static int type_offset; - static int type_size; int ret; - if (!type_size) { - ret = get_common_info("common_type", - &type_offset, - &type_size); + if (!*size) { + ret = get_common_info(name, offset, size); if (ret < 0) return ret; } - return read_size(data + type_offset, type_size); + return read_size(data + *offset, *size); +} + +int trace_parse_common_type(void *data) +{ + static int type_offset; + static int type_size; + + return __parse_common(data, &type_size, &type_offset, + (char *)"common_type"); } static int parse_common_pid(void *data) { static int pid_offset; static int pid_size; + + return __parse_common(data, &pid_size, &pid_offset, + (char *)"common_pid"); +} + +static int parse_common_pc(void *data) +{ + static int pc_offset; + static int pc_size; + + return __parse_common(data, &pc_size, &pc_offset, + (char *)"common_preempt_count"); +} + +static int parse_common_flags(void *data) +{ + static int flags_offset; + static int flags_size; + + return __parse_common(data, &flags_size, &flags_offset, + (char *)"common_flags"); +} + +static int parse_common_lock_depth(void *data) +{ + static int ld_offset; + static int ld_size; int ret; - if (!pid_size) { - ret = get_common_info("common_pid", - &pid_offset, - &pid_size); - if (ret < 0) - return ret; - } + ret = __parse_common(data, &ld_size, &ld_offset, + (char *)"common_lock_depth"); + if (ret < 0) + return -1; - return read_size(data + pid_offset, pid_size); + return ret; } struct event *trace_find_event(int id) @@ -2525,6 +2557,41 @@ static inline int log10_cpu(int nb) return 1; } +static void print_lat_fmt(void *data, int size __unused) +{ + unsigned int lat_flags; + unsigned int pc; + int lock_depth; + int hardirq; + int softirq; + + lat_flags = parse_common_flags(data); + pc = parse_common_pc(data); + lock_depth = parse_common_lock_depth(data); + + hardirq = lat_flags & TRACE_FLAG_HARDIRQ; + softirq = lat_flags & TRACE_FLAG_SOFTIRQ; + + printf("%c%c%c", + (lat_flags & TRACE_FLAG_IRQS_OFF) ? 'd' : + (lat_flags & TRACE_FLAG_IRQS_NOSUPPORT) ? + 'X' : '.', + (lat_flags & TRACE_FLAG_NEED_RESCHED) ? + 'N' : '.', + (hardirq && softirq) ? 'H' : + hardirq ? 'h' : softirq ? 's' : '.'); + + if (pc) + printf("%x", pc); + else + printf("."); + + if (lock_depth < 0) + printf("."); + else + printf("%d", lock_depth); +} + /* taken from Linux, written by Frederic Weisbecker */ static void print_graph_cpu(int cpu) { @@ -2768,6 +2835,11 @@ pretty_print_func_ent(void *data, int size, struct event *event, printf(" | "); + if (latency_format) { + print_lat_fmt(data, size); + printf(" | "); + } + field = find_field(event, "func"); if (!field) die("function entry does not have func field"); @@ -2811,6 +2883,11 @@ pretty_print_func_ret(void *data, int size __unused, struct event *event, printf(" | "); + if (latency_format) { + print_lat_fmt(data, size); + printf(" | "); + } + field = find_field(event, "rettime"); if (!field) die("can't find rettime in return graph"); @@ -2882,9 +2959,14 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs, return pretty_print_func_graph(data, size, event, cpu, pid, comm, secs, usecs); - printf("%16s-%-5d [%03d] %5lu.%09Lu: %s: ", - comm, pid, cpu, - secs, nsecs, event->name); + if (latency_format) { + printf("%8.8s-%-5d %3d", + comm, pid, cpu); + print_lat_fmt(data, size); + } else + printf("%16s-%-5d [%03d]", comm, pid, cpu); + + printf(" %5lu.%06lu: %s: ", secs, usecs, event->name); if (event->flags & EVENT_FL_FAILED) { printf("EVENT '%s' FAILED TO PARSE\n", diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index 29821acc8db6..f6637c2fa1fe 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -239,6 +239,8 @@ extern int header_page_size_size; extern int header_page_data_offset; extern int header_page_data_size; +extern int latency_format; + int parse_header_page(char *buf, unsigned long size); int trace_parse_common_type(void *data); struct event *trace_find_event(int id); @@ -248,4 +250,13 @@ void *raw_field_ptr(struct event *event, const char *name, void *data); void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events); +/* taken from kernel/trace/trace.h */ +enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 0x01, + TRACE_FLAG_IRQS_NOSUPPORT = 0x02, + TRACE_FLAG_NEED_RESCHED = 0x04, + TRACE_FLAG_HARDIRQ = 0x08, + TRACE_FLAG_SOFTIRQ = 0x10, +}; + #endif /* __PERF_TRACE_EVENTS_H */ -- cgit v1.2.3 From afdf1a404eed236d6f762ee44cc0f1dcc97206e0 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:43 -0400 Subject: perf tools: Handle - and + in parsing trace print format The opterators '-' and '+' are not handled in the trace print format. To do: '++' and '--'. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194400.330843045@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index fde1a434d630..2d424ff50ec2 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -2106,6 +2106,12 @@ static unsigned long long eval_num_arg(void *data, int size, die("unknown op '%s'", arg->op.op); val = left == right; break; + case '-': + val = left - right; + break; + case '+': + val = left + right; + break; default: die("unknown op '%s'", arg->op.op); } -- cgit v1.2.3 From c4dc775f53136cd6af8f88bce67cce9b42751768 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:44 -0400 Subject: perf tools: Remove all char * typecasts and use const in prototype The (char *) for all the static strings was a fix for the symptom and not the disease. The real issue was that the function prototypes needed to be declared "const char *". Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194400.635935008@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 122 ++++++++++++++++++------------------ 1 file changed, 61 insertions(+), 61 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 2d424ff50ec2..4b61b497040e 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -685,10 +685,10 @@ static char *event_read_name(void) { char *token; - if (read_expected(EVENT_ITEM, (char *)"name") < 0) + if (read_expected(EVENT_ITEM, "name") < 0) return NULL; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return NULL; if (read_expect_type(EVENT_ITEM, &token) < 0) @@ -706,10 +706,10 @@ static int event_read_id(void) char *token; int id; - if (read_expected_item(EVENT_ITEM, (char *)"ID") < 0) + if (read_expected_item(EVENT_ITEM, "ID") < 0) return -1; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return -1; if (read_expect_type(EVENT_ITEM, &token) < 0) @@ -759,7 +759,7 @@ static int event_read_fields(struct event *event, struct format_field **fields) count++; - if (test_type_token(type, token, EVENT_ITEM, (char *)"field")) + if (test_type_token(type, token, EVENT_ITEM, "field")) goto fail; free_token(token); @@ -774,7 +774,7 @@ static int event_read_fields(struct event *event, struct format_field **fields) type = read_token(&token); } - if (test_type_token(type, token, EVENT_OP, (char *)":") < 0) + if (test_type_token(type, token, EVENT_OP, ":") < 0) return -1; if (read_expect_type(EVENT_ITEM, &token) < 0) @@ -892,14 +892,14 @@ static int event_read_fields(struct event *event, struct format_field **fields) field->flags |= FIELD_IS_DYNAMIC; } - if (test_type_token(type, token, EVENT_OP, (char *)";")) + if (test_type_token(type, token, EVENT_OP, ";")) goto fail; free_token(token); - if (read_expected(EVENT_ITEM, (char *)"offset") < 0) + if (read_expected(EVENT_ITEM, "offset") < 0) goto fail_expect; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) goto fail_expect; if (read_expect_type(EVENT_ITEM, &token)) @@ -907,13 +907,13 @@ static int event_read_fields(struct event *event, struct format_field **fields) field->offset = strtoul(token, NULL, 0); free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) goto fail_expect; - if (read_expected(EVENT_ITEM, (char *)"size") < 0) + if (read_expected(EVENT_ITEM, "size") < 0) goto fail_expect; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) goto fail_expect; if (read_expect_type(EVENT_ITEM, &token)) @@ -921,18 +921,18 @@ static int event_read_fields(struct event *event, struct format_field **fields) field->size = strtoul(token, NULL, 0); free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) goto fail_expect; type = read_token(&token); if (type != EVENT_NEWLINE) { /* newer versions of the kernel have a "signed" type */ - if (test_type_token(type, token, EVENT_ITEM, (char *)"signed")) + if (test_type_token(type, token, EVENT_ITEM, "signed")) goto fail; free_token(token); - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) goto fail_expect; if (read_expect_type(EVENT_ITEM, &token)) @@ -941,7 +941,7 @@ static int event_read_fields(struct event *event, struct format_field **fields) /* add signed type */ free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) goto fail_expect; if (read_expect_type(EVENT_NEWLINE, &token)) @@ -970,10 +970,10 @@ static int event_read_format(struct event *event) char *token; int ret; - if (read_expected_item(EVENT_ITEM, (char *)"format") < 0) + if (read_expected_item(EVENT_ITEM, "format") < 0) return -1; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return -1; if (read_expect_type(EVENT_NEWLINE, &token)) @@ -1033,7 +1033,7 @@ process_cond(struct event *event, struct print_arg *top, char **tok) *tok = NULL; type = process_arg(event, left, &token); - if (test_type_token(type, token, EVENT_OP, (char *)":")) + if (test_type_token(type, token, EVENT_OP, ":")) goto out_free; arg->op.op = token; @@ -1065,7 +1065,7 @@ process_array(struct event *event, struct print_arg *top, char **tok) *tok = NULL; type = process_arg(event, arg, &token); - if (test_type_token(type, token, EVENT_OP, (char *)"]")) + if (test_type_token(type, token, EVENT_OP, "]")) goto out_free; top->op.right = arg; @@ -1287,7 +1287,7 @@ process_entry(struct event *event __unused, struct print_arg *arg, char *field; char *token; - if (read_expected(EVENT_OP, (char *)"->") < 0) + if (read_expected(EVENT_OP, "->") < 0) return EVENT_ERROR; if (read_expect_type(EVENT_ITEM, &token) < 0) @@ -1447,14 +1447,14 @@ process_fields(struct event *event, struct print_flag_sym **list, char **tok) do { free_token(token); type = read_token_item(&token); - if (test_type_token(type, token, EVENT_OP, (char *)"{")) + if (test_type_token(type, token, EVENT_OP, "{")) break; arg = malloc_or_die(sizeof(*arg)); free_token(token); type = process_arg(event, arg, &token); - if (test_type_token(type, token, EVENT_DELIM, (char *)",")) + if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free; field = malloc_or_die(sizeof(*field)); @@ -1465,7 +1465,7 @@ process_fields(struct event *event, struct print_flag_sym **list, char **tok) free_token(token); type = process_arg(event, arg, &token); - if (test_type_token(type, token, EVENT_OP, (char *)"}")) + if (test_type_token(type, token, EVENT_OP, "}")) goto out_free; value = arg_eval(arg); @@ -1500,13 +1500,13 @@ process_flags(struct event *event, struct print_arg *arg, char **tok) memset(arg, 0, sizeof(*arg)); arg->type = PRINT_FLAGS; - if (read_expected_item(EVENT_DELIM, (char *)"(") < 0) + if (read_expected_item(EVENT_DELIM, "(") < 0) return EVENT_ERROR; field = malloc_or_die(sizeof(*field)); type = process_arg(event, field, &token); - if (test_type_token(type, token, EVENT_DELIM, (char *)",")) + if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free; arg->flags.field = field; @@ -1517,11 +1517,11 @@ process_flags(struct event *event, struct print_arg *arg, char **tok) type = read_token_item(&token); } - if (test_type_token(type, token, EVENT_DELIM, (char *)",")) + if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free; type = process_fields(event, &arg->flags.flags, &token); - if (test_type_token(type, token, EVENT_DELIM, (char *)")")) + if (test_type_token(type, token, EVENT_DELIM, ")")) goto out_free; free_token(token); @@ -1543,19 +1543,19 @@ process_symbols(struct event *event, struct print_arg *arg, char **tok) memset(arg, 0, sizeof(*arg)); arg->type = PRINT_SYMBOL; - if (read_expected_item(EVENT_DELIM, (char *)"(") < 0) + if (read_expected_item(EVENT_DELIM, "(") < 0) return EVENT_ERROR; field = malloc_or_die(sizeof(*field)); type = process_arg(event, field, &token); - if (test_type_token(type, token, EVENT_DELIM, (char *)",")) + if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free; arg->symbol.field = field; type = process_fields(event, &arg->symbol.symbols, &token); - if (test_type_token(type, token, EVENT_DELIM, (char *)")")) + if (test_type_token(type, token, EVENT_DELIM, ")")) goto out_free; free_token(token); @@ -1585,7 +1585,7 @@ process_paren(struct event *event, struct print_arg *arg, char **tok) if (type == EVENT_ERROR) return EVENT_ERROR; - if (test_type_token(type, token, EVENT_DELIM, (char *)")")) { + if (test_type_token(type, token, EVENT_DELIM, ")")) { free_token(token); return EVENT_ERROR; } @@ -1626,7 +1626,7 @@ process_str(struct event *event __unused, struct print_arg *arg, char **tok) enum event_type type; char *token; - if (read_expected(EVENT_DELIM, (char *)"(") < 0) + if (read_expected(EVENT_DELIM, "(") < 0) return EVENT_ERROR; if (read_expect_type(EVENT_ITEM, &token) < 0) @@ -1636,7 +1636,7 @@ process_str(struct event *event __unused, struct print_arg *arg, char **tok) arg->string.string = token; arg->string.offset = -1; - if (read_expected(EVENT_DELIM, (char *)")") < 0) + if (read_expected(EVENT_DELIM, ")") < 0) return EVENT_ERROR; type = read_token(&token); @@ -1775,13 +1775,13 @@ static int event_read_print(struct event *event) char *token; int ret; - if (read_expected_item(EVENT_ITEM, (char *)"print") < 0) + if (read_expected_item(EVENT_ITEM, "print") < 0) return -1; - if (read_expected(EVENT_ITEM, (char *)"fmt") < 0) + if (read_expected(EVENT_ITEM, "fmt") < 0) return -1; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return -1; if (read_expect_type(EVENT_DQUOTE, &token) < 0) @@ -1811,8 +1811,8 @@ static int event_read_print(struct event *event) token = cat; goto concat; } - - if (test_type_token(type, token, EVENT_DELIM, (char *)",")) + + if (test_type_token(type, token, EVENT_DELIM, ",")) goto fail; free_token(token); @@ -1931,7 +1931,7 @@ static int get_common_info(const char *type, int *offset, int *size) } static int __parse_common(void *data, int *size, int *offset, - char *name) + const char *name) { int ret; @@ -1949,7 +1949,7 @@ int trace_parse_common_type(void *data) static int type_size; return __parse_common(data, &type_size, &type_offset, - (char *)"common_type"); + "common_type"); } static int parse_common_pid(void *data) @@ -1958,7 +1958,7 @@ static int parse_common_pid(void *data) static int pid_size; return __parse_common(data, &pid_size, &pid_offset, - (char *)"common_pid"); + "common_pid"); } static int parse_common_pc(void *data) @@ -1967,7 +1967,7 @@ static int parse_common_pc(void *data) static int pc_size; return __parse_common(data, &pc_size, &pc_offset, - (char *)"common_preempt_count"); + "common_preempt_count"); } static int parse_common_flags(void *data) @@ -1976,7 +1976,7 @@ static int parse_common_flags(void *data) static int flags_size; return __parse_common(data, &flags_size, &flags_offset, - (char *)"common_flags"); + "common_flags"); } static int parse_common_lock_depth(void *data) @@ -1986,7 +1986,7 @@ static int parse_common_lock_depth(void *data) int ret; ret = __parse_common(data, &ld_size, &ld_offset, - (char *)"common_lock_depth"); + "common_lock_depth"); if (ret < 0) return -1; @@ -3049,15 +3049,15 @@ static void print_args(struct print_arg *args) } } -static void parse_header_field(char *field, +static void parse_header_field(const char *field, int *offset, int *size) { char *token; int type; - if (read_expected(EVENT_ITEM, (char *)"field") < 0) + if (read_expected(EVENT_ITEM, "field") < 0) return; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return; /* type */ @@ -3067,27 +3067,27 @@ static void parse_header_field(char *field, if (read_expected(EVENT_ITEM, field) < 0) return; - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) return; - if (read_expected(EVENT_ITEM, (char *)"offset") < 0) + if (read_expected(EVENT_ITEM, "offset") < 0) return; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return; if (read_expect_type(EVENT_ITEM, &token) < 0) goto fail; *offset = atoi(token); free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) return; - if (read_expected(EVENT_ITEM, (char *)"size") < 0) + if (read_expected(EVENT_ITEM, "size") < 0) return; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return; if (read_expect_type(EVENT_ITEM, &token) < 0) goto fail; *size = atoi(token); free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) return; type = read_token(&token); if (type != EVENT_NEWLINE) { @@ -3095,19 +3095,19 @@ static void parse_header_field(char *field, if (type != EVENT_ITEM) goto fail; - if (strcmp(token, (char *)"signed") != 0) + if (strcmp(token, "signed") != 0) goto fail; free_token(token); - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return; if (read_expect_type(EVENT_ITEM, &token)) goto fail; free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) return; if (read_expect_type(EVENT_NEWLINE, &token)) @@ -3121,11 +3121,11 @@ int parse_header_page(char *buf, unsigned long size) { init_input_buf(buf, size); - parse_header_field((char *)"timestamp", &header_page_ts_offset, + parse_header_field("timestamp", &header_page_ts_offset, &header_page_ts_size); - parse_header_field((char *)"commit", &header_page_size_offset, + parse_header_field("commit", &header_page_size_offset, &header_page_size_size); - parse_header_field((char *)"data", &header_page_data_offset, + parse_header_field("data", &header_page_data_offset, &header_page_data_size); return 0; -- cgit v1.2.3 From fce29d15b59245597f7f320db4a9f2be0f5fb512 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 15 Oct 2009 11:20:34 +0800 Subject: tracing/filters: Refactor subsystem filter code Change: for_each_pred for_each_subsystem To: for_each_subsystem for_each_pred This change also prepares for later patches. Signed-off-by: Li Zefan Acked-by: Peter Zijlstra Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <4AD69502.8060903@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace.h | 1 - kernel/trace/trace_events_filter.c | 124 ++++++++++++++----------------------- 2 files changed, 45 insertions(+), 80 deletions(-) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index acef8b4636f0..628614532d16 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -683,7 +683,6 @@ struct event_filter { int n_preds; struct filter_pred **preds; char *filter_string; - bool no_reset; }; struct event_subsystem { diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 92672016da28..9c4f9a0dae2b 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -615,14 +615,7 @@ static int init_subsystem_preds(struct event_subsystem *system) return 0; } -enum { - FILTER_DISABLE_ALL, - FILTER_INIT_NO_RESET, - FILTER_SKIP_NO_RESET, -}; - -static void filter_free_subsystem_preds(struct event_subsystem *system, - int flag) +static void filter_free_subsystem_preds(struct event_subsystem *system) { struct ftrace_event_call *call; @@ -633,14 +626,6 @@ static void filter_free_subsystem_preds(struct event_subsystem *system, if (strcmp(call->system, system->name) != 0) continue; - if (flag == FILTER_INIT_NO_RESET) { - call->filter->no_reset = false; - continue; - } - - if (flag == FILTER_SKIP_NO_RESET && call->filter->no_reset) - continue; - filter_disable_preds(call); remove_filter_string(call->filter); } @@ -817,44 +802,6 @@ add_pred_fn: return 0; } -static int filter_add_subsystem_pred(struct filter_parse_state *ps, - struct event_subsystem *system, - struct filter_pred *pred, - char *filter_string, - bool dry_run) -{ - struct ftrace_event_call *call; - int err = 0; - bool fail = true; - - list_for_each_entry(call, &ftrace_events, list) { - - if (!call->define_fields) - continue; - - if (strcmp(call->system, system->name)) - continue; - - if (call->filter->no_reset) - continue; - - err = filter_add_pred(ps, call, pred, dry_run); - if (err) - call->filter->no_reset = true; - else - fail = false; - - if (!dry_run) - replace_filter_string(call->filter, filter_string); - } - - if (fail) { - parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); - return err; - } - return 0; -} - static void parse_init(struct filter_parse_state *ps, struct filter_op *ops, char *infix_string) @@ -1209,8 +1156,7 @@ static int check_preds(struct filter_parse_state *ps) return 0; } -static int replace_preds(struct event_subsystem *system, - struct ftrace_event_call *call, +static int replace_preds(struct ftrace_event_call *call, struct filter_parse_state *ps, char *filter_string, bool dry_run) @@ -1257,11 +1203,7 @@ static int replace_preds(struct event_subsystem *system, add_pred: if (!pred) return -ENOMEM; - if (call) - err = filter_add_pred(ps, call, pred, false); - else - err = filter_add_subsystem_pred(ps, system, pred, - filter_string, dry_run); + err = filter_add_pred(ps, call, pred, dry_run); filter_free_pred(pred); if (err) return err; @@ -1272,6 +1214,44 @@ add_pred: return 0; } +static int replace_system_preds(struct event_subsystem *system, + struct filter_parse_state *ps, + char *filter_string) +{ + struct ftrace_event_call *call; + int err; + bool fail = true; + + list_for_each_entry(call, &ftrace_events, list) { + + if (!call->define_fields) + continue; + + if (strcmp(call->system, system->name) != 0) + continue; + + /* try to see if the filter can be applied */ + err = replace_preds(call, ps, filter_string, true); + if (err) + continue; + + /* really apply the filter */ + filter_disable_preds(call); + err = replace_preds(call, ps, filter_string, false); + if (err) + filter_disable_preds(call); + else + replace_filter_string(call->filter, filter_string); + fail = false; + } + + if (fail) { + parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); + return err; + } + return 0; +} + int apply_event_filter(struct ftrace_event_call *call, char *filter_string) { int err; @@ -1306,7 +1286,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) goto out; } - err = replace_preds(NULL, call, ps, filter_string, false); + err = replace_preds(call, ps, filter_string, false); if (err) append_filter_err(ps, call->filter); @@ -1334,7 +1314,7 @@ int apply_subsystem_event_filter(struct event_subsystem *system, goto out_unlock; if (!strcmp(strstrip(filter_string), "0")) { - filter_free_subsystem_preds(system, FILTER_DISABLE_ALL); + filter_free_subsystem_preds(system); remove_filter_string(system->filter); mutex_unlock(&event_mutex); return 0; @@ -1354,23 +1334,9 @@ int apply_subsystem_event_filter(struct event_subsystem *system, goto out; } - filter_free_subsystem_preds(system, FILTER_INIT_NO_RESET); - - /* try to see the filter can be applied to which events */ - err = replace_preds(system, NULL, ps, filter_string, true); - if (err) { - append_filter_err(ps, system->filter); - goto out; - } - - filter_free_subsystem_preds(system, FILTER_SKIP_NO_RESET); - - /* really apply the filter to the events */ - err = replace_preds(system, NULL, ps, filter_string, false); - if (err) { + err = replace_system_preds(system, ps, filter_string); + if (err) append_filter_err(ps, system->filter); - filter_free_subsystem_preds(system, 2); - } out: filter_opstack_clear(ps); -- cgit v1.2.3 From b0f1a59a98d7ac2102e7e4f22904c26d564a5628 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 15 Oct 2009 11:21:12 +0800 Subject: tracing/filters: Use a different op for glob match "==" will always do a full match, and "~" will do a glob match. In the future, we may add "=~" for regex match. Signed-off-by: Li Zefan Acked-by: Peter Zijlstra Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <4AD69528.3050309@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace.h | 2 +- kernel/trace/trace_events_filter.c | 59 ++++++++++++++++++-------------------- 2 files changed, 29 insertions(+), 32 deletions(-) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 628614532d16..ffe53ddbe67a 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -702,7 +702,7 @@ typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, typedef int (*regex_match_func)(char *str, struct regex *r, int len); enum regex_type { - MATCH_FULL, + MATCH_FULL = 0, MATCH_FRONT_ONLY, MATCH_MIDDLE_ONLY, MATCH_END_ONLY, diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 9c4f9a0dae2b..273845fce393 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -29,6 +29,7 @@ enum filter_op_ids { OP_OR, OP_AND, + OP_GLOB, OP_NE, OP_EQ, OP_LT, @@ -46,16 +47,17 @@ struct filter_op { }; static struct filter_op filter_ops[] = { - { OP_OR, "||", 1 }, - { OP_AND, "&&", 2 }, - { OP_NE, "!=", 4 }, - { OP_EQ, "==", 4 }, - { OP_LT, "<", 5 }, - { OP_LE, "<=", 5 }, - { OP_GT, ">", 5 }, - { OP_GE, ">=", 5 }, - { OP_NONE, "OP_NONE", 0 }, - { OP_OPEN_PAREN, "(", 0 }, + { OP_OR, "||", 1 }, + { OP_AND, "&&", 2 }, + { OP_GLOB, "~", 4 }, + { OP_NE, "!=", 4 }, + { OP_EQ, "==", 4 }, + { OP_LT, "<", 5 }, + { OP_LE, "<=", 5 }, + { OP_GT, ">", 5 }, + { OP_GE, ">=", 5 }, + { OP_NONE, "OP_NONE", 0 }, + { OP_OPEN_PAREN, "(", 0 }, }; enum { @@ -329,22 +331,18 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not) return type; } -static int filter_build_regex(struct filter_pred *pred) +static void filter_build_regex(struct filter_pred *pred) { struct regex *r = &pred->regex; - char *search, *dup; - enum regex_type type; - int not; - - type = filter_parse_regex(r->pattern, r->len, &search, ¬); - dup = kstrdup(search, GFP_KERNEL); - if (!dup) - return -ENOMEM; - - strcpy(r->pattern, dup); - kfree(dup); - - r->len = strlen(r->pattern); + char *search; + enum regex_type type = MATCH_FULL; + int not = 0; + + if (pred->op == OP_GLOB) { + type = filter_parse_regex(r->pattern, r->len, &search, ¬); + r->len = strlen(search); + memmove(r->pattern, search, r->len+1); + } switch (type) { case MATCH_FULL: @@ -362,8 +360,6 @@ static int filter_build_regex(struct filter_pred *pred) } pred->not ^= not; - - return 0; } /* return 1 if event matches, 0 otherwise (discard) */ @@ -676,7 +672,10 @@ static bool is_string_field(struct ftrace_event_field *field) static int is_legal_op(struct ftrace_event_field *field, int op) { - if (is_string_field(field) && (op != OP_EQ && op != OP_NE)) + if (is_string_field(field) && + (op != OP_EQ && op != OP_NE && op != OP_GLOB)) + return 0; + if (!is_string_field(field) && op == OP_GLOB) return 0; return 1; @@ -761,15 +760,13 @@ static int filter_add_pred(struct filter_parse_state *ps, } if (is_string_field(field)) { - ret = filter_build_regex(pred); - if (ret) - return ret; + filter_build_regex(pred); if (field->filter_type == FILTER_STATIC_STRING) { fn = filter_pred_string; pred->regex.field_len = field->size; } else if (field->filter_type == FILTER_DYN_STRING) - fn = filter_pred_strloc; + fn = filter_pred_strloc; else { fn = filter_pred_pchar; pred->regex.field_len = strlen(pred->regex.pattern); -- cgit v1.2.3 From 6fb2915df7f0747d9044da9dbff5b46dc2e20830 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 15 Oct 2009 11:21:42 +0800 Subject: tracing/profile: Add filter support - Add an ioctl to allocate a filter for a perf event. - Free the filter when the associated perf event is to be freed. - Do the filtering in perf_swevent_match(). Signed-off-by: Li Zefan Acked-by: Peter Zijlstra Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <4AD69546.8050401@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/linux/ftrace_event.h | 11 ++- include/linux/perf_counter.h | 1 + include/linux/perf_event.h | 6 ++ kernel/perf_event.c | 80 ++++++++++++++++++++-- kernel/trace/trace.h | 3 +- kernel/trace/trace_events_filter.c | 133 +++++++++++++++++++++++++++++-------- 6 files changed, 199 insertions(+), 35 deletions(-) diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 4ec5e67e18cf..d11770472bc8 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -144,7 +144,7 @@ extern char *trace_profile_buf_nmi; #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ extern void destroy_preds(struct ftrace_event_call *call); -extern int filter_match_preds(struct ftrace_event_call *call, void *rec); +extern int filter_match_preds(struct event_filter *filter, void *rec); extern int filter_current_check_discard(struct ring_buffer *buffer, struct ftrace_event_call *call, void *rec, @@ -186,4 +186,13 @@ do { \ __trace_printk(ip, fmt, ##args); \ } while (0) +#ifdef CONFIG_EVENT_PROFILE +struct perf_event; +extern int ftrace_profile_enable(int event_id); +extern void ftrace_profile_disable(int event_id); +extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, + char *filter_str); +extern void ftrace_profile_free_filter(struct perf_event *event); +#endif + #endif /* _LINUX_FTRACE_EVENT_H */ diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 7b7fbf433cff..91a2b4309e7a 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -225,6 +225,7 @@ struct perf_counter_attr { #define PERF_COUNTER_IOC_RESET _IO ('$', 3) #define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) #define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5) +#define PERF_COUNTER_IOC_SET_FILTER _IOW('$', 6, char *) enum perf_counter_ioc_flags { PERF_IOC_FLAG_GROUP = 1U << 0, diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2e6d95f97419..df9d964c15fc 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -221,6 +221,7 @@ struct perf_event_attr { #define PERF_EVENT_IOC_RESET _IO ('$', 3) #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) +#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) enum perf_event_ioc_flags { PERF_IOC_FLAG_GROUP = 1U << 0, @@ -633,7 +634,12 @@ struct perf_event { struct pid_namespace *ns; u64 id; + +#ifdef CONFIG_EVENT_PROFILE + struct event_filter *filter; #endif + +#endif /* CONFIG_PERF_EVENTS */ }; /** diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 9d0b5c665883..12b5ec39bf97 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -28,6 +28,7 @@ #include #include #include +#include #include @@ -1658,6 +1659,8 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) return ERR_PTR(err); } +static void perf_event_free_filter(struct perf_event *event); + static void free_event_rcu(struct rcu_head *head) { struct perf_event *event; @@ -1665,6 +1668,7 @@ static void free_event_rcu(struct rcu_head *head) event = container_of(head, struct perf_event, rcu_head); if (event->ns) put_pid_ns(event->ns); + perf_event_free_filter(event); kfree(event); } @@ -1974,7 +1978,8 @@ unlock: return ret; } -int perf_event_set_output(struct perf_event *event, int output_fd); +static int perf_event_set_output(struct perf_event *event, int output_fd); +static int perf_event_set_filter(struct perf_event *event, void __user *arg); static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -2002,6 +2007,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case PERF_EVENT_IOC_SET_OUTPUT: return perf_event_set_output(event, arg); + case PERF_EVENT_IOC_SET_FILTER: + return perf_event_set_filter(event, (void __user *)arg); + default: return -ENOTTY; } @@ -3806,9 +3814,14 @@ static int perf_swevent_is_counting(struct perf_event *event) return 1; } +static int perf_tp_event_match(struct perf_event *event, + struct perf_sample_data *data); + static int perf_swevent_match(struct perf_event *event, enum perf_type_id type, - u32 event_id, struct pt_regs *regs) + u32 event_id, + struct perf_sample_data *data, + struct pt_regs *regs) { if (!perf_swevent_is_counting(event)) return 0; @@ -3826,6 +3839,10 @@ static int perf_swevent_match(struct perf_event *event, return 0; } + if (event->attr.type == PERF_TYPE_TRACEPOINT && + !perf_tp_event_match(event, data)) + return 0; + return 1; } @@ -3842,7 +3859,7 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx, rcu_read_lock(); list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { - if (perf_swevent_match(event, type, event_id, regs)) + if (perf_swevent_match(event, type, event_id, data, regs)) perf_swevent_add(event, nr, nmi, data, regs); } rcu_read_unlock(); @@ -4086,6 +4103,7 @@ static const struct pmu perf_ops_task_clock = { }; #ifdef CONFIG_EVENT_PROFILE + void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size) { @@ -4109,8 +4127,15 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, } EXPORT_SYMBOL_GPL(perf_tp_event); -extern int ftrace_profile_enable(int); -extern void ftrace_profile_disable(int); +static int perf_tp_event_match(struct perf_event *event, + struct perf_sample_data *data) +{ + void *record = data->raw->data; + + if (likely(!event->filter) || filter_match_preds(event->filter, record)) + return 1; + return 0; +} static void tp_perf_event_destroy(struct perf_event *event) { @@ -4135,12 +4160,53 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) return &perf_ops_generic; } + +static int perf_event_set_filter(struct perf_event *event, void __user *arg) +{ + char *filter_str; + int ret; + + if (event->attr.type != PERF_TYPE_TRACEPOINT) + return -EINVAL; + + filter_str = strndup_user(arg, PAGE_SIZE); + if (IS_ERR(filter_str)) + return PTR_ERR(filter_str); + + ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); + + kfree(filter_str); + return ret; +} + +static void perf_event_free_filter(struct perf_event *event) +{ + ftrace_profile_free_filter(event); +} + #else + +static int perf_tp_event_match(struct perf_event *event, + struct perf_sample_data *data) +{ + return 1; +} + static const struct pmu *tp_perf_event_init(struct perf_event *event) { return NULL; } -#endif + +static int perf_event_set_filter(struct perf_event *event, void __user *arg) +{ + return -ENOENT; +} + +static void perf_event_free_filter(struct perf_event *event) +{ +} + +#endif /* CONFIG_EVENT_PROFILE */ atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; @@ -4394,7 +4460,7 @@ err_size: goto out; } -int perf_event_set_output(struct perf_event *event, int output_fd) +static int perf_event_set_output(struct perf_event *event, int output_fd) { struct perf_event *output_event = NULL; struct file *output_file = NULL; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index ffe53ddbe67a..4959ada9e0bb 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -743,7 +743,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec, struct ring_buffer *buffer, struct ring_buffer_event *event) { - if (unlikely(call->filter_active) && !filter_match_preds(call, rec)) { + if (unlikely(call->filter_active) && + !filter_match_preds(call->filter, rec)) { ring_buffer_discard_commit(buffer, event); return 1; } diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 273845fce393..e27bb6acc2dd 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "trace.h" #include "trace_output.h" @@ -363,9 +364,8 @@ static void filter_build_regex(struct filter_pred *pred) } /* return 1 if event matches, 0 otherwise (discard) */ -int filter_match_preds(struct ftrace_event_call *call, void *rec) +int filter_match_preds(struct event_filter *filter, void *rec) { - struct event_filter *filter = call->filter; int match, top = 0, val1 = 0, val2 = 0; int stack[MAX_FILTER_PRED]; struct filter_pred *pred; @@ -538,9 +538,8 @@ static void filter_disable_preds(struct ftrace_event_call *call) filter->preds[i]->fn = filter_pred_none; } -void destroy_preds(struct ftrace_event_call *call) +static void __free_preds(struct event_filter *filter) { - struct event_filter *filter = call->filter; int i; if (!filter) @@ -553,21 +552,24 @@ void destroy_preds(struct ftrace_event_call *call) kfree(filter->preds); kfree(filter->filter_string); kfree(filter); +} + +void destroy_preds(struct ftrace_event_call *call) +{ + __free_preds(call->filter); call->filter = NULL; + call->filter_active = 0; } -static int init_preds(struct ftrace_event_call *call) +static struct event_filter *__alloc_preds(void) { struct event_filter *filter; struct filter_pred *pred; int i; - if (call->filter) - return 0; - - filter = call->filter = kzalloc(sizeof(*filter), GFP_KERNEL); - if (!call->filter) - return -ENOMEM; + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return ERR_PTR(-ENOMEM); filter->n_preds = 0; @@ -583,12 +585,24 @@ static int init_preds(struct ftrace_event_call *call) filter->preds[i] = pred; } - return 0; + return filter; oom: - destroy_preds(call); + __free_preds(filter); + return ERR_PTR(-ENOMEM); +} + +static int init_preds(struct ftrace_event_call *call) +{ + if (call->filter) + return 0; + + call->filter_active = 0; + call->filter = __alloc_preds(); + if (IS_ERR(call->filter)) + return PTR_ERR(call->filter); - return -ENOMEM; + return 0; } static int init_subsystem_preds(struct event_subsystem *system) @@ -629,10 +643,10 @@ static void filter_free_subsystem_preds(struct event_subsystem *system) static int filter_add_pred_fn(struct filter_parse_state *ps, struct ftrace_event_call *call, + struct event_filter *filter, struct filter_pred *pred, filter_pred_fn_t fn) { - struct event_filter *filter = call->filter; int idx, err; if (filter->n_preds == MAX_FILTER_PRED) { @@ -647,7 +661,6 @@ static int filter_add_pred_fn(struct filter_parse_state *ps, return err; filter->n_preds++; - call->filter_active = 1; return 0; } @@ -726,6 +739,7 @@ static filter_pred_fn_t select_comparison_fn(int op, int field_size, static int filter_add_pred(struct filter_parse_state *ps, struct ftrace_event_call *call, + struct event_filter *filter, struct filter_pred *pred, bool dry_run) { @@ -795,7 +809,7 @@ static int filter_add_pred(struct filter_parse_state *ps, add_pred_fn: if (!dry_run) - return filter_add_pred_fn(ps, call, pred, fn); + return filter_add_pred_fn(ps, call, filter, pred, fn); return 0; } @@ -1154,6 +1168,7 @@ static int check_preds(struct filter_parse_state *ps) } static int replace_preds(struct ftrace_event_call *call, + struct event_filter *filter, struct filter_parse_state *ps, char *filter_string, bool dry_run) @@ -1200,7 +1215,7 @@ static int replace_preds(struct ftrace_event_call *call, add_pred: if (!pred) return -ENOMEM; - err = filter_add_pred(ps, call, pred, dry_run); + err = filter_add_pred(ps, call, filter, pred, dry_run); filter_free_pred(pred); if (err) return err; @@ -1216,6 +1231,7 @@ static int replace_system_preds(struct event_subsystem *system, char *filter_string) { struct ftrace_event_call *call; + struct event_filter *filter; int err; bool fail = true; @@ -1228,17 +1244,19 @@ static int replace_system_preds(struct event_subsystem *system, continue; /* try to see if the filter can be applied */ - err = replace_preds(call, ps, filter_string, true); + err = replace_preds(call, filter, ps, filter_string, true); if (err) continue; /* really apply the filter */ filter_disable_preds(call); - err = replace_preds(call, ps, filter_string, false); + err = replace_preds(call, filter, ps, filter_string, false); if (err) filter_disable_preds(call); - else - replace_filter_string(call->filter, filter_string); + else { + call->filter_active = 1; + replace_filter_string(filter, filter_string); + } fail = false; } @@ -1252,7 +1270,6 @@ static int replace_system_preds(struct event_subsystem *system, int apply_event_filter(struct ftrace_event_call *call, char *filter_string) { int err; - struct filter_parse_state *ps; mutex_lock(&event_mutex); @@ -1283,10 +1300,11 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) goto out; } - err = replace_preds(call, ps, filter_string, false); + err = replace_preds(call, call->filter, ps, filter_string, false); if (err) append_filter_err(ps, call->filter); - + else + call->filter_active = 1; out: filter_opstack_clear(ps); postfix_clear(ps); @@ -1301,7 +1319,6 @@ int apply_subsystem_event_filter(struct event_subsystem *system, char *filter_string) { int err; - struct filter_parse_state *ps; mutex_lock(&event_mutex); @@ -1345,3 +1362,67 @@ out_unlock: return err; } +#ifdef CONFIG_EVENT_PROFILE + +void ftrace_profile_free_filter(struct perf_event *event) +{ + struct event_filter *filter = event->filter; + + event->filter = NULL; + __free_preds(filter); +} + +int ftrace_profile_set_filter(struct perf_event *event, int event_id, + char *filter_str) +{ + int err; + struct event_filter *filter; + struct filter_parse_state *ps; + struct ftrace_event_call *call = NULL; + + mutex_lock(&event_mutex); + + list_for_each_entry(call, &ftrace_events, list) { + if (call->id == event_id) + break; + } + if (!call) + return -EINVAL; + + if (event->filter) + return -EEXIST; + + filter = __alloc_preds(); + if (IS_ERR(filter)) + return PTR_ERR(filter); + + err = -ENOMEM; + ps = kzalloc(sizeof(*ps), GFP_KERNEL); + if (!ps) + goto free_preds; + + parse_init(ps, filter_ops, filter_str); + err = filter_parse(ps); + if (err) + goto free_ps; + + err = replace_preds(call, filter, ps, filter_str, false); + if (!err) + event->filter = filter; + +free_ps: + filter_opstack_clear(ps); + postfix_clear(ps); + kfree(ps); + +free_preds: + if (err) + __free_preds(filter); + + mutex_unlock(&event_mutex); + + return err; +} + +#endif /* CONFIG_EVENT_PROFILE */ + -- cgit v1.2.3 From c171b552a7d316c7e1c3ad6f70a30178dd53e14c Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 15 Oct 2009 11:22:07 +0800 Subject: perf trace: Add filter Suppport Add a new option "--filter " to perf record, and it should be right after "-e trace_point": #./perf record -R -f -e irq:irq_handler_entry --filter irq==18 ^C # ./perf trace perf-4303 ... irq_handler_entry: irq=18 handler=eth0 init-0 ... irq_handler_entry: irq=18 handler=eth0 init-0 ... irq_handler_entry: irq=18 handler=eth0 init-0 ... irq_handler_entry: irq=18 handler=eth0 init-0 ... irq_handler_entry: irq=18 handler=eth0 See Documentation/trace/events.txt for the syntax of filter expressions. Signed-off-by: Li Zefan Acked-by: Peter Zijlstra Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <4AD6955F.90602@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 15 ++++++++++++++- tools/perf/util/parse-events.c | 26 ++++++++++++++++++++++++-- tools/perf/util/parse-events.h | 2 ++ 3 files changed, 40 insertions(+), 3 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 4e3a374e7aa7..8b2c860c49a2 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -374,9 +374,11 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n static void create_counter(int counter, int cpu, pid_t pid) { + char *filter = filters[counter]; struct perf_event_attr *attr = attrs + counter; struct perf_header_attr *h_attr; int track = !counter; /* only the first counter needs these */ + int ret; struct { u64 count; u64 time_enabled; @@ -479,7 +481,6 @@ try_again: multiplex_fd = fd[nr_cpu][counter]; if (multiplex && fd[nr_cpu][counter] != multiplex_fd) { - int ret; ret = ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd); assert(ret != -1); @@ -499,6 +500,16 @@ try_again: } } + if (filter != NULL) { + ret = ioctl(fd[nr_cpu][counter], + PERF_EVENT_IOC_SET_FILTER, filter); + if (ret) { + error("failed to set filter with %d (%s)\n", errno, + strerror(errno)); + exit(-1); + } + } + ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE); } @@ -676,6 +687,8 @@ static const struct option options[] = { OPT_CALLBACK('e', "event", NULL, "event", "event selector. use 'perf list' to list available events", parse_events), + OPT_CALLBACK(0, "filter", NULL, "filter", + "event filter", parse_filter), OPT_INTEGER('p', "pid", &target_pid, "record events on existing pid"), OPT_INTEGER('r', "realtime", &realtime_prio, diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 8cfb48cbbea0..b097570e9623 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -8,9 +8,10 @@ #include "cache.h" #include "header.h" -int nr_counters; +int nr_counters; struct perf_event_attr attrs[MAX_COUNTERS]; +char *filters[MAX_COUNTERS]; struct event_symbol { u8 type; @@ -708,7 +709,6 @@ static void store_event_type(const char *orgname) perf_header__push_event(id, orgname); } - int parse_events(const struct option *opt __used, const char *str, int unset __used) { struct perf_event_attr attr; @@ -745,6 +745,28 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u return 0; } +int parse_filter(const struct option *opt __used, const char *str, + int unset __used) +{ + int i = nr_counters - 1; + int len = strlen(str); + + if (i < 0 || attrs[i].type != PERF_TYPE_TRACEPOINT) { + fprintf(stderr, + "-F option should follow a -e tracepoint option\n"); + return -1; + } + + filters[i] = malloc(len + 1); + if (!filters[i]) { + fprintf(stderr, "not enough memory to hold filter string\n"); + return -1; + } + strcpy(filters[i], str); + + return 0; +} + static const char * const event_type_descriptors[] = { "", "Hardware event", diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 8626a439033d..b8c1f64bc935 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -17,11 +17,13 @@ extern struct tracepoint_path *tracepoint_id_to_path(u64 config); extern int nr_counters; extern struct perf_event_attr attrs[MAX_COUNTERS]; +extern char *filters[MAX_COUNTERS]; extern const char *event_name(int ctr); extern const char *__event_name(int type, u64 config); extern int parse_events(const struct option *opt, const char *str, int unset); +extern int parse_filter(const struct option *opt, const char *str, int unset); #define EVENTS_HELP_MAX (128*1024) -- cgit v1.2.3 From a66abe7fbf7805a1a02f241bd5283265ff6706ec Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 15 Oct 2009 12:24:04 +0200 Subject: tracing/events: Fix locking imbalance in the filter code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Américo Wang noticed that we have a locking imbalance in the error paths of ftrace_profile_set_filter(), causing potential leakage of event_mutex. Also clean up other error codepaths related to event_mutex while at it. Plus fix an initialized variable in the subsystem filter code. Reported-by: Américo Wang Cc: Li Zefan Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <2375c9f90910150247u5ccb8e2at58c764e385ffa490@mail.gmail.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_events_filter.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index e27bb6acc2dd..21d34757b955 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -1230,10 +1230,10 @@ static int replace_system_preds(struct event_subsystem *system, struct filter_parse_state *ps, char *filter_string) { + struct event_filter *filter = system->filter; struct ftrace_event_call *call; - struct event_filter *filter; - int err; bool fail = true; + int err; list_for_each_entry(call, &ftrace_events, list) { @@ -1262,7 +1262,7 @@ static int replace_system_preds(struct event_subsystem *system, if (fail) { parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); - return err; + return -EINVAL; } return 0; } @@ -1281,8 +1281,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) if (!strcmp(strstrip(filter_string), "0")) { filter_disable_preds(call); remove_filter_string(call->filter); - mutex_unlock(&event_mutex); - return 0; + goto out_unlock; } err = -ENOMEM; @@ -1330,8 +1329,7 @@ int apply_subsystem_event_filter(struct event_subsystem *system, if (!strcmp(strstrip(filter_string), "0")) { filter_free_subsystem_preds(system); remove_filter_string(system->filter); - mutex_unlock(&event_mutex); - return 0; + goto out_unlock; } err = -ENOMEM; @@ -1386,15 +1384,20 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id, if (call->id == event_id) break; } + + err = -EINVAL; if (!call) - return -EINVAL; + goto out_unlock; + err = -EEXIST; if (event->filter) - return -EEXIST; + goto out_unlock; filter = __alloc_preds(); - if (IS_ERR(filter)) - return PTR_ERR(filter); + if (IS_ERR(filter)) { + err = PTR_ERR(filter); + goto out_unlock; + } err = -ENOMEM; ps = kzalloc(sizeof(*ps), GFP_KERNEL); @@ -1419,6 +1422,7 @@ free_preds: if (err) __free_preds(filter); +out_unlock: mutex_unlock(&event_mutex); return err; -- cgit v1.2.3 From 434a83c3fbb951908a3a52040f7f0e0b8ba00dd0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 15 Oct 2009 11:50:39 +0200 Subject: events: Harmonize event field names and print output names Now that we can filter based on fields via perf record, people will start using filter expressions and will expect them to be obvious. The primary way to see which fields are available is by looking at the trace output, such as: gcc-18676 [000] 343.011728: irq_handler_entry: irq=0 handler=timer cc1-18677 [000] 343.012727: irq_handler_entry: irq=0 handler=timer cc1-18677 [000] 343.032692: irq_handler_entry: irq=0 handler=timer cc1-18677 [000] 343.033690: irq_handler_entry: irq=0 handler=timer cc1-18677 [000] 343.034687: irq_handler_entry: irq=0 handler=timer cc1-18677 [000] 343.035686: irq_handler_entry: irq=0 handler=timer cc1-18677 [000] 343.036684: irq_handler_entry: irq=0 handler=timer While 'irq==0' filters work, the 'handler==' filter expression does not work: $ perf record -R -f -a -e irq:irq_handler_entry --filter handler=timer sleep 1 Error: failed to set filter with 22 (Invalid argument) The problem is that while an 'irq' field exists and is recognized as a filter field - 'handler' does not exist - its name is 'name' in the output. To solve this, we need to synchronize the printout and the field names, wherever possible. In cases where the printout prints a non-field, we enclose that information in square brackets, such as: perf-1380 [013] 724.903505: softirq_exit: vec=9 [action=RCU] perf-1380 [013] 724.904482: softirq_exit: vec=1 [action=TIMER] This way users can use filter expressions more intuitively: all fields that show up as 'primary' (non-bracketed) information is filterable. This patch harmonizes the field names for all irq, bkl, power, sched and timer events. We might in fact think about dropping the print format bit of generic tracepoints altogether, and just print the fields that are being recorded. Cc: Li Zefan Cc: Tom Zanussi Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- include/trace/events/bkl.h | 18 +++++----- include/trace/events/irq.h | 8 ++--- include/trace/events/power.h | 2 -- include/trace/events/sched.h | 44 ++++++++++++------------ include/trace/events/timer.h | 79 ++++++++++++++++++++++---------------------- 5 files changed, 74 insertions(+), 77 deletions(-) diff --git a/include/trace/events/bkl.h b/include/trace/events/bkl.h index 8abd620a490e..1af72dc24278 100644 --- a/include/trace/events/bkl.h +++ b/include/trace/events/bkl.h @@ -13,7 +13,7 @@ TRACE_EVENT(lock_kernel, TP_ARGS(func, file, line), TP_STRUCT__entry( - __field( int, lock_depth ) + __field( int, depth ) __field_ext( const char *, func, FILTER_PTR_STRING ) __field_ext( const char *, file, FILTER_PTR_STRING ) __field( int, line ) @@ -21,13 +21,13 @@ TRACE_EVENT(lock_kernel, TP_fast_assign( /* We want to record the lock_depth after lock is acquired */ - __entry->lock_depth = current->lock_depth + 1; + __entry->depth = current->lock_depth + 1; __entry->func = func; __entry->file = file; __entry->line = line; ), - TP_printk("depth: %d, %s:%d %s()", __entry->lock_depth, + TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth, __entry->file, __entry->line, __entry->func) ); @@ -38,20 +38,20 @@ TRACE_EVENT(unlock_kernel, TP_ARGS(func, file, line), TP_STRUCT__entry( - __field(int, lock_depth) - __field(const char *, func) - __field(const char *, file) - __field(int, line) + __field(int, depth ) + __field(const char *, func ) + __field(const char *, file ) + __field(int, line ) ), TP_fast_assign( - __entry->lock_depth = current->lock_depth; + __entry->depth = current->lock_depth; __entry->func = func; __entry->file = file; __entry->line = line; ), - TP_printk("depth: %d, %s:%d %s()", __entry->lock_depth, + TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth, __entry->file, __entry->line, __entry->func) ); diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index b89f9db4a404..dcfcd4407623 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h @@ -48,7 +48,7 @@ TRACE_EVENT(irq_handler_entry, __assign_str(name, action->name); ), - TP_printk("irq=%d handler=%s", __entry->irq, __get_str(name)) + TP_printk("irq=%d name=%s", __entry->irq, __get_str(name)) ); /** @@ -78,7 +78,7 @@ TRACE_EVENT(irq_handler_exit, __entry->ret = ret; ), - TP_printk("irq=%d return=%s", + TP_printk("irq=%d ret=%s", __entry->irq, __entry->ret ? "handled" : "unhandled") ); @@ -107,7 +107,7 @@ TRACE_EVENT(softirq_entry, __entry->vec = (int)(h - vec); ), - TP_printk("softirq=%d action=%s", __entry->vec, + TP_printk("vec=%d [action=%s]", __entry->vec, show_softirq_name(__entry->vec)) ); @@ -136,7 +136,7 @@ TRACE_EVENT(softirq_exit, __entry->vec = (int)(h - vec); ), - TP_printk("softirq=%d action=%s", __entry->vec, + TP_printk("vec=%d [action=%s]", __entry->vec, show_softirq_name(__entry->vec)) ); diff --git a/include/trace/events/power.h b/include/trace/events/power.h index ea6d579261ad..9bb96e5a2848 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -16,8 +16,6 @@ enum { }; #endif - - TRACE_EVENT(power_start, TP_PROTO(unsigned int type, unsigned int state), diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 4069c43f4187..b50b9856c59f 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -26,7 +26,7 @@ TRACE_EVENT(sched_kthread_stop, __entry->pid = t->pid; ), - TP_printk("task %s:%d", __entry->comm, __entry->pid) + TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid) ); /* @@ -46,7 +46,7 @@ TRACE_EVENT(sched_kthread_stop_ret, __entry->ret = ret; ), - TP_printk("ret %d", __entry->ret) + TP_printk("ret=%d", __entry->ret) ); /* @@ -73,7 +73,7 @@ TRACE_EVENT(sched_wait_task, __entry->prio = p->prio; ), - TP_printk("task %s:%d [%d]", + TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio) ); @@ -94,7 +94,7 @@ TRACE_EVENT(sched_wakeup, __field( pid_t, pid ) __field( int, prio ) __field( int, success ) - __field( int, cpu ) + __field( int, target_cpu ) ), TP_fast_assign( @@ -102,12 +102,12 @@ TRACE_EVENT(sched_wakeup, __entry->pid = p->pid; __entry->prio = p->prio; __entry->success = success; - __entry->cpu = task_cpu(p); + __entry->target_cpu = task_cpu(p); ), - TP_printk("task %s:%d [%d] success=%d [%03d]", + TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d", __entry->comm, __entry->pid, __entry->prio, - __entry->success, __entry->cpu) + __entry->success, __entry->target_cpu) ); /* @@ -127,7 +127,7 @@ TRACE_EVENT(sched_wakeup_new, __field( pid_t, pid ) __field( int, prio ) __field( int, success ) - __field( int, cpu ) + __field( int, target_cpu ) ), TP_fast_assign( @@ -135,12 +135,12 @@ TRACE_EVENT(sched_wakeup_new, __entry->pid = p->pid; __entry->prio = p->prio; __entry->success = success; - __entry->cpu = task_cpu(p); + __entry->target_cpu = task_cpu(p); ), - TP_printk("task %s:%d [%d] success=%d [%03d]", + TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d", __entry->comm, __entry->pid, __entry->prio, - __entry->success, __entry->cpu) + __entry->success, __entry->target_cpu) ); /* @@ -176,7 +176,7 @@ TRACE_EVENT(sched_switch, __entry->next_prio = next->prio; ), - TP_printk("task %s:%d [%d] (%s) ==> %s:%d [%d]", + TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d", __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, __entry->prev_state ? __print_flags(__entry->prev_state, "|", @@ -211,7 +211,7 @@ TRACE_EVENT(sched_migrate_task, __entry->dest_cpu = dest_cpu; ), - TP_printk("task %s:%d [%d] from: %d to: %d", + TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d", __entry->comm, __entry->pid, __entry->prio, __entry->orig_cpu, __entry->dest_cpu) ); @@ -237,7 +237,7 @@ TRACE_EVENT(sched_process_free, __entry->prio = p->prio; ), - TP_printk("task %s:%d [%d]", + TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio) ); @@ -262,7 +262,7 @@ TRACE_EVENT(sched_process_exit, __entry->prio = p->prio; ), - TP_printk("task %s:%d [%d]", + TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio) ); @@ -287,7 +287,7 @@ TRACE_EVENT(sched_process_wait, __entry->prio = current->prio; ), - TP_printk("task %s:%d [%d]", + TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio) ); @@ -314,7 +314,7 @@ TRACE_EVENT(sched_process_fork, __entry->child_pid = child->pid; ), - TP_printk("parent %s:%d child %s:%d", + TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d", __entry->parent_comm, __entry->parent_pid, __entry->child_comm, __entry->child_pid) ); @@ -340,7 +340,7 @@ TRACE_EVENT(sched_signal_send, __entry->sig = sig; ), - TP_printk("sig: %d task %s:%d", + TP_printk("sig=%d comm=%s pid=%d", __entry->sig, __entry->comm, __entry->pid) ); @@ -374,7 +374,7 @@ TRACE_EVENT(sched_stat_wait, __perf_count(delay); ), - TP_printk("task: %s:%d wait: %Lu [ns]", + TP_printk("comm=%s pid=%d delay=%Lu [ns]", __entry->comm, __entry->pid, (unsigned long long)__entry->delay) ); @@ -406,7 +406,7 @@ TRACE_EVENT(sched_stat_runtime, __perf_count(runtime); ), - TP_printk("task: %s:%d runtime: %Lu [ns], vruntime: %Lu [ns]", + TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]", __entry->comm, __entry->pid, (unsigned long long)__entry->runtime, (unsigned long long)__entry->vruntime) @@ -437,7 +437,7 @@ TRACE_EVENT(sched_stat_sleep, __perf_count(delay); ), - TP_printk("task: %s:%d sleep: %Lu [ns]", + TP_printk("comm=%s pid=%d delay=%Lu [ns]", __entry->comm, __entry->pid, (unsigned long long)__entry->delay) ); @@ -467,7 +467,7 @@ TRACE_EVENT(sched_stat_iowait, __perf_count(delay); ), - TP_printk("task: %s:%d iowait: %Lu [ns]", + TP_printk("comm=%s pid=%d delay=%Lu [ns]", __entry->comm, __entry->pid, (unsigned long long)__entry->delay) ); diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 1844c48d640e..e5ce87a0498d 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -26,7 +26,7 @@ TRACE_EVENT(timer_init, __entry->timer = timer; ), - TP_printk("timer %p", __entry->timer) + TP_printk("timer=%p", __entry->timer) ); /** @@ -54,7 +54,7 @@ TRACE_EVENT(timer_start, __entry->now = jiffies; ), - TP_printk("timer %p: func %pf, expires %lu, timeout %ld", + TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]", __entry->timer, __entry->function, __entry->expires, (long)__entry->expires - __entry->now) ); @@ -81,7 +81,7 @@ TRACE_EVENT(timer_expire_entry, __entry->now = jiffies; ), - TP_printk("timer %p: now %lu", __entry->timer, __entry->now) + TP_printk("timer=%p now=%lu", __entry->timer, __entry->now) ); /** @@ -108,7 +108,7 @@ TRACE_EVENT(timer_expire_exit, __entry->timer = timer; ), - TP_printk("timer %p", __entry->timer) + TP_printk("timer=%p", __entry->timer) ); /** @@ -129,7 +129,7 @@ TRACE_EVENT(timer_cancel, __entry->timer = timer; ), - TP_printk("timer %p", __entry->timer) + TP_printk("timer=%p", __entry->timer) ); /** @@ -140,24 +140,24 @@ TRACE_EVENT(timer_cancel, */ TRACE_EVENT(hrtimer_init, - TP_PROTO(struct hrtimer *timer, clockid_t clockid, + TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid, enum hrtimer_mode mode), - TP_ARGS(timer, clockid, mode), + TP_ARGS(hrtimer, clockid, mode), TP_STRUCT__entry( - __field( void *, timer ) + __field( void *, hrtimer ) __field( clockid_t, clockid ) __field( enum hrtimer_mode, mode ) ), TP_fast_assign( - __entry->timer = timer; + __entry->hrtimer = hrtimer; __entry->clockid = clockid; __entry->mode = mode; ), - TP_printk("hrtimer %p, clockid %s, mode %s", __entry->timer, + TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer, __entry->clockid == CLOCK_REALTIME ? "CLOCK_REALTIME" : "CLOCK_MONOTONIC", __entry->mode == HRTIMER_MODE_ABS ? @@ -170,26 +170,26 @@ TRACE_EVENT(hrtimer_init, */ TRACE_EVENT(hrtimer_start, - TP_PROTO(struct hrtimer *timer), + TP_PROTO(struct hrtimer *hrtimer), - TP_ARGS(timer), + TP_ARGS(hrtimer), TP_STRUCT__entry( - __field( void *, timer ) + __field( void *, hrtimer ) __field( void *, function ) __field( s64, expires ) __field( s64, softexpires ) ), TP_fast_assign( - __entry->timer = timer; - __entry->function = timer->function; - __entry->expires = hrtimer_get_expires(timer).tv64; - __entry->softexpires = hrtimer_get_softexpires(timer).tv64; + __entry->hrtimer = hrtimer; + __entry->function = hrtimer->function; + __entry->expires = hrtimer_get_expires(hrtimer).tv64; + __entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64; ), - TP_printk("hrtimer %p, func %pf, expires %llu, softexpires %llu", - __entry->timer, __entry->function, + TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu", + __entry->hrtimer, __entry->function, (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->expires }), (unsigned long long)ktime_to_ns((ktime_t) { @@ -206,23 +206,22 @@ TRACE_EVENT(hrtimer_start, */ TRACE_EVENT(hrtimer_expire_entry, - TP_PROTO(struct hrtimer *timer, ktime_t *now), + TP_PROTO(struct hrtimer *hrtimer, ktime_t *now), - TP_ARGS(timer, now), + TP_ARGS(hrtimer, now), TP_STRUCT__entry( - __field( void *, timer ) + __field( void *, hrtimer ) __field( s64, now ) ), TP_fast_assign( - __entry->timer = timer; - __entry->now = now->tv64; + __entry->hrtimer = hrtimer; + __entry->now = now->tv64; ), - TP_printk("hrtimer %p, now %llu", __entry->timer, - (unsigned long long)ktime_to_ns((ktime_t) { - .tv64 = __entry->now })) + TP_printk("hrtimer=%p now=%llu", __entry->hrtimer, + (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) ); /** @@ -234,40 +233,40 @@ TRACE_EVENT(hrtimer_expire_entry, */ TRACE_EVENT(hrtimer_expire_exit, - TP_PROTO(struct hrtimer *timer), + TP_PROTO(struct hrtimer *hrtimer), - TP_ARGS(timer), + TP_ARGS(hrtimer), TP_STRUCT__entry( - __field( void *, timer ) + __field( void *, hrtimer ) ), TP_fast_assign( - __entry->timer = timer; + __entry->hrtimer = hrtimer; ), - TP_printk("hrtimer %p", __entry->timer) + TP_printk("hrtimer=%p", __entry->hrtimer) ); /** * hrtimer_cancel - called when the hrtimer is canceled - * @timer: pointer to struct hrtimer + * @hrtimer: pointer to struct hrtimer */ TRACE_EVENT(hrtimer_cancel, - TP_PROTO(struct hrtimer *timer), + TP_PROTO(struct hrtimer *hrtimer), - TP_ARGS(timer), + TP_ARGS(hrtimer), TP_STRUCT__entry( - __field( void *, timer ) + __field( void *, hrtimer ) ), TP_fast_assign( - __entry->timer = timer; + __entry->hrtimer = hrtimer; ), - TP_printk("hrtimer %p", __entry->timer) + TP_printk("hrtimer=%p", __entry->hrtimer) ); /** @@ -302,7 +301,7 @@ TRACE_EVENT(itimer_state, __entry->interval_usec = value->it_interval.tv_usec; ), - TP_printk("which %d, expires %lu, it_value %lu.%lu, it_interval %lu.%lu", + TP_printk("which=%d expires=%lu it_value=%lu.%lu it_interval=%lu.%lu", __entry->which, __entry->expires, __entry->value_sec, __entry->value_usec, __entry->interval_sec, __entry->interval_usec) @@ -332,7 +331,7 @@ TRACE_EVENT(itimer_expire, __entry->pid = pid_nr(pid); ), - TP_printk("which %d, pid %d, now %lu", __entry->which, + TP_printk("which=%d pid=%d now=%lu", __entry->which, (int) __entry->pid, __entry->now) ); -- cgit v1.2.3 From 210f9cb2cf2effca690271085f4bd6a3ea286e6c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 16 Oct 2009 10:34:28 +0200 Subject: perf tools: Bump version to 0.0.2 We released the first version of perf with 0.0.1 in v2.6.31, time to double our version number to 0.0.2 ;-) Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/util/PERF-VERSION-GEN | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN index c561d1538c03..54552a00a117 100755 --- a/tools/perf/util/PERF-VERSION-GEN +++ b/tools/perf/util/PERF-VERSION-GEN @@ -1,7 +1,7 @@ #!/bin/sh GVF=PERF-VERSION-FILE -DEF_VER=v0.0.1.PERF +DEF_VER=v0.0.2.PERF LF=' ' -- cgit v1.2.3 From f39cdf25bf77219676ec5360980ac40b1a7e144a Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 17 Oct 2009 08:43:17 +0200 Subject: perf tools: Move dereference after NULL test In each case, if the NULL test on thread is needed, then the dereference should be after the NULL test. A simplified version of the semantic match that detects this problem is as follows (http://coccinelle.lip6.fr/): // @match exists@ expression x, E; identifier fld; @@ * x->fld ... when != \(x = E\|&x\) * x == NULL // Signed-off-by: Julia Lawall LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 4 ++-- tools/perf/builtin-report.c | 4 ++-- tools/perf/builtin-sched.c | 4 ++-- tools/perf/builtin-trace.c | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 3fe0de03004d..56ba71658d70 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -104,14 +104,14 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) event->ip.pid, (void *)(long)ip); - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - if (thread == NULL) { fprintf(stderr, "problem processing %d event, skipping it.\n", event->header.type); return -1; } + dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + if (event->header.misc & PERF_RECORD_MISC_KERNEL) { level = 'k'; sym = kernel_maps__find_symbol(ip, &map); diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 015c79745966..a4f8cc209151 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -629,14 +629,14 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) } } - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - if (thread == NULL) { eprintf("problem processing %d event, skipping it.\n", event->header.type); return -1; } + dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + if (comm_list && !strlist__has_entry(comm_list, thread->comm)) return 0; diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index c9c68563e964..57ad3f458ef5 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1667,14 +1667,14 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) (void *)(long)ip, (long long)period); - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - if (thread == NULL) { eprintf("problem processing %d event, skipping it.\n", event->header.type); return -1; } + dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + if (profile_cpu != -1 && profile_cpu != (int) cpu) return 0; diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index ce8459ac2845..4c129ff0bb16 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -80,14 +80,14 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) (void *)(long)ip, (long long)period); - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - if (thread == NULL) { eprintf("problem processing %d event, skipping it.\n", event->header.type); return -1; } + dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + if (sample_type & PERF_SAMPLE_RAW) { struct { u32 size; -- cgit v1.2.3 From 11018201b831e19304c0d639f105ad6c27e120b1 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 18 Oct 2009 22:29:23 +1100 Subject: perf stat: Add branch performance metric When we count both branches and branch-misses it is useful to print out the percentage of branch-misses: # perf stat -e branches -e branch-misses /bin/true Performance counter stats for '/bin/true': 401684 branches # 0.000 M/sec 23301 branch-misses # 5.801 % Signed-off-by: Anton Blanchard Cc: paulus@samba.org Cc: a.p.zijlstra@chello.nl LKML-Reference: <20091018112923.GQ4808@kryten> Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 3db31e7bf173..c37368343fff 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -125,6 +125,7 @@ struct stats event_res_stats[MAX_COUNTERS][3]; struct stats runtime_nsecs_stats; struct stats walltime_nsecs_stats; struct stats runtime_cycles_stats; +struct stats runtime_branches_stats; #define MATCH_EVENT(t, c, counter) \ (attrs[counter].type == PERF_TYPE_##t && \ @@ -235,6 +236,8 @@ static void read_counter(int counter) update_stats(&runtime_nsecs_stats, count[0]); if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter)) update_stats(&runtime_cycles_stats, count[0]); + if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter)) + update_stats(&runtime_branches_stats, count[0]); } static int run_perf_stat(int argc __used, const char **argv) @@ -352,6 +355,14 @@ static void abs_printout(int counter, double avg) ratio = avg / total; fprintf(stderr, " # %10.3f IPC ", ratio); + } else if (MATCH_EVENT(HARDWARE, HW_BRANCH_MISSES, counter)) { + total = avg_stats(&runtime_branches_stats); + + if (total) + ratio = avg * 100 / total; + + fprintf(stderr, " # %10.3f %% ", ratio); + } else { total = avg_stats(&runtime_nsecs_stats); -- cgit v1.2.3 From 5a116dd2797677cad48fee2f42267e3cb69f5502 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 17 Oct 2009 17:12:33 +0200 Subject: perf tools: Use kernel bitmap library Use the kernel bitmap library for internal perf tools uses. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Steven Rostedt LKML-Reference: <1255792354-11304-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 16 +++++++++ tools/perf/builtin-record.c | 3 -- tools/perf/builtin-sched.c | 2 -- tools/perf/util/include/asm/bitops.h | 6 ++++ tools/perf/util/include/asm/byteorder.h | 2 ++ tools/perf/util/include/asm/swab.h | 1 + tools/perf/util/include/asm/types.h | 17 +++++++++ tools/perf/util/include/asm/uaccess.h | 14 ++++++++ tools/perf/util/include/linux/bitmap.h | 2 ++ tools/perf/util/include/linux/bitops.h | 27 +++++++++++++++ tools/perf/util/include/linux/compiler.h | 10 ++++++ tools/perf/util/include/linux/ctype.h | 1 + tools/perf/util/include/linux/kernel.h | 59 ++++++++++++++++++++++++++++++++ tools/perf/util/include/linux/types.h | 1 + 14 files changed, 156 insertions(+), 5 deletions(-) create mode 100644 tools/perf/util/include/asm/bitops.h create mode 100644 tools/perf/util/include/asm/byteorder.h create mode 100644 tools/perf/util/include/asm/swab.h create mode 100644 tools/perf/util/include/asm/types.h create mode 100644 tools/perf/util/include/asm/uaccess.h create mode 100644 tools/perf/util/include/linux/bitmap.h create mode 100644 tools/perf/util/include/linux/bitops.h create mode 100644 tools/perf/util/include/linux/compiler.h create mode 100644 tools/perf/util/include/linux/ctype.h create mode 100644 tools/perf/util/include/linux/types.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 106c15055b50..2400e5068a2f 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -363,6 +363,9 @@ LIB_OBJS += util/parse-options.o LIB_OBJS += util/parse-events.o LIB_OBJS += util/path.o LIB_OBJS += util/rbtree.o +LIB_OBJS += util/bitmap.o +LIB_OBJS += util/hweight.o +LIB_OBJS += util/find_next_bit.o LIB_OBJS += util/run-command.o LIB_OBJS += util/quote.o LIB_OBJS += util/strbuf.o @@ -790,6 +793,19 @@ util/config.o: util/config.c PERF-CFLAGS util/rbtree.o: ../../lib/rbtree.c PERF-CFLAGS $(QUIET_CC)$(CC) -o util/rbtree.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< +# some perf warning policies can't fit to lib/bitmap.c, eg: it warns about variable shadowing +# from that comes from kernel headers wrapping. +KBITMAP_FLAGS=`echo $(ALL_CFLAGS) | sed s/-Wshadow// | sed s/-Wswitch-default// | sed s/-Wextra//` + +util/bitmap.o: ../../lib/bitmap.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o util/bitmap.o -c $(KBITMAP_FLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< + +util/hweight.o: ../../lib/hweight.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o util/hweight.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< + +util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< + perf-%$X: %.o $(PERFLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 8b2c860c49a2..fc3709cba136 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -21,9 +21,6 @@ #include #include -#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) -#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) - static int fd[MAX_NR_CPUS][MAX_COUNTERS]; static long default_interval = 0; diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 57ad3f458ef5..807ca66e7a8c 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -38,8 +38,6 @@ static int cwdlen; #define PR_SET_NAME 15 /* Set process name */ #define MAX_CPUS 4096 -#define BUG_ON(x) assert(!(x)) - static u64 run_measurement_overhead; static u64 sleep_measurement_overhead; diff --git a/tools/perf/util/include/asm/bitops.h b/tools/perf/util/include/asm/bitops.h new file mode 100644 index 000000000000..fbe4d9212919 --- /dev/null +++ b/tools/perf/util/include/asm/bitops.h @@ -0,0 +1,6 @@ +#include "../../../../include/asm-generic/bitops/__fls.h" +#include "../../../../include/asm-generic/bitops/fls.h" +#include "../../../../include/asm-generic/bitops/fls64.h" +#include "../../../../include/asm-generic/bitops/__ffs.h" +#include "../../../../include/asm-generic/bitops/ffz.h" +#include "../../../../include/asm-generic/bitops/hweight.h" diff --git a/tools/perf/util/include/asm/byteorder.h b/tools/perf/util/include/asm/byteorder.h new file mode 100644 index 000000000000..39f367cfaf56 --- /dev/null +++ b/tools/perf/util/include/asm/byteorder.h @@ -0,0 +1,2 @@ +#include "../asm/types.h" +#include "../../../../include/linux/swab.h" diff --git a/tools/perf/util/include/asm/swab.h b/tools/perf/util/include/asm/swab.h new file mode 100644 index 000000000000..ed538942523d --- /dev/null +++ b/tools/perf/util/include/asm/swab.h @@ -0,0 +1 @@ +/* stub */ diff --git a/tools/perf/util/include/asm/types.h b/tools/perf/util/include/asm/types.h new file mode 100644 index 000000000000..06703c6cd50e --- /dev/null +++ b/tools/perf/util/include/asm/types.h @@ -0,0 +1,17 @@ +#ifndef PERF_ASM_TYPES_H_ +#define PERF_ASM_TYPES_H_ + +#include +#include "../../types.h" +#include + +/* CHECKME: Not sure both always match */ +#define BITS_PER_LONG __WORDSIZE + +typedef u64 __u64; +typedef u32 __u32; +typedef u16 __u16; +typedef u8 __u8; +typedef s64 __s64; + +#endif /* PERF_ASM_TYPES_H_ */ diff --git a/tools/perf/util/include/asm/uaccess.h b/tools/perf/util/include/asm/uaccess.h new file mode 100644 index 000000000000..d0f72b8fcc35 --- /dev/null +++ b/tools/perf/util/include/asm/uaccess.h @@ -0,0 +1,14 @@ +#ifndef _PERF_ASM_UACCESS_H_ +#define _PERF_ASM_UACCESS_H_ + +#define __get_user(src, dest) \ +({ \ + (src) = *dest; \ + 0; \ +}) + +#define get_user __get_user + +#define access_ok(type, addr, size) 1 + +#endif diff --git a/tools/perf/util/include/linux/bitmap.h b/tools/perf/util/include/linux/bitmap.h new file mode 100644 index 000000000000..821c1033bccf --- /dev/null +++ b/tools/perf/util/include/linux/bitmap.h @@ -0,0 +1,2 @@ +#include "../../../../include/linux/bitmap.h" +#include "../../../../include/asm-generic/bitops/find.h" diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h new file mode 100644 index 000000000000..ace57c36d1d0 --- /dev/null +++ b/tools/perf/util/include/linux/bitops.h @@ -0,0 +1,27 @@ +#ifndef _PERF_LINUX_BITOPS_H_ +#define _PERF_LINUX_BITOPS_H_ + +#define __KERNEL__ + +#define CONFIG_GENERIC_FIND_NEXT_BIT +#define CONFIG_GENERIC_FIND_FIRST_BIT +#include "../../../../include/linux/bitops.h" + +static inline void set_bit(int nr, unsigned long *addr) +{ + addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG); +} + +static __always_inline int test_bit(unsigned int nr, const unsigned long *addr) +{ + return ((1UL << (nr % BITS_PER_LONG)) & + (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; +} + +unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned + long size, unsigned long offset); + +unsigned long generic_find_next_le_bit(const unsigned long *addr, unsigned + long size, unsigned long offset); + +#endif diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h new file mode 100644 index 000000000000..dfb0713ed47f --- /dev/null +++ b/tools/perf/util/include/linux/compiler.h @@ -0,0 +1,10 @@ +#ifndef _PERF_LINUX_COMPILER_H_ +#define _PERF_LINUX_COMPILER_H_ + +#ifndef __always_inline +#define __always_inline inline +#endif +#define __user +#define __attribute_const__ + +#endif diff --git a/tools/perf/util/include/linux/ctype.h b/tools/perf/util/include/linux/ctype.h new file mode 100644 index 000000000000..bae5783282ef --- /dev/null +++ b/tools/perf/util/include/linux/ctype.h @@ -0,0 +1 @@ +#include "../../../../include/linux/ctype.h" diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h index a6b87390cb52..4b9204d9b266 100644 --- a/tools/perf/util/include/linux/kernel.h +++ b/tools/perf/util/include/linux/kernel.h @@ -1,6 +1,16 @@ #ifndef PERF_LINUX_KERNEL_H_ #define PERF_LINUX_KERNEL_H_ +#include +#include +#include +#include + +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) + +#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) +#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) + #ifndef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #endif @@ -26,4 +36,53 @@ _max1 > _max2 ? _max1 : _max2; }) #endif +#ifndef min +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) +#endif + +#ifndef BUG_ON +#define BUG_ON(cond) assert(!(cond)) +#endif + +/* + * Both need more care to handle endianness + * (Don't use bitmap_copy_le() for now) + */ +#define cpu_to_le64(x) (x) +#define cpu_to_le32(x) (x) + +static inline int +vscnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int i; + ssize_t ssize = size; + + i = vsnprintf(buf, size, fmt, args); + + return (i >= ssize) ? (ssize - 1) : i; +} + +static inline int scnprintf(char * buf, size_t size, const char * fmt, ...) +{ + va_list args; + ssize_t ssize = size; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + + return (i >= ssize) ? (ssize - 1) : i; +} + +static inline unsigned long +simple_strtoul(const char *nptr, char **endptr, int base) +{ + return strtoul(nptr, endptr, base); +} + #endif diff --git a/tools/perf/util/include/linux/types.h b/tools/perf/util/include/linux/types.h new file mode 100644 index 000000000000..ed538942523d --- /dev/null +++ b/tools/perf/util/include/linux/types.h @@ -0,0 +1 @@ +/* stub */ -- cgit v1.2.3 From 2ba0825075e76236d22a20decd8e2346a99faabe Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 17 Oct 2009 17:12:34 +0200 Subject: perf tools: Introduce bitmask'ed additional headers This provides a new set of bitmasked headers. A new field is added in the perf headers that implements a bitmap storing optional features present in the perf.data file. The layout can be pictured like this: (Usual perf headers)(Features bitmap)[Feature 0][Feature n][Feature 255] If the bit n is set, then the feature n is used in this file. They are all set in order. This brings a backward and forward compatibility. The trace_info section has moved into such optional features, this is the first and only one for now. This is backward compatible with the .32 file version although it doesn't support the previous separate trace.info file. And finally it doesn't support the current interim development version. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Steven Rostedt LKML-Reference: <1255792354-11304-2-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 4 +- tools/perf/util/header.c | 100 +++++++++++++++++++++++++------------------- tools/perf/util/header.h | 30 ++++++++----- 3 files changed, 78 insertions(+), 56 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index fc3709cba136..f0467ff0d8ad 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -574,11 +574,11 @@ static int __cmd_record(int argc, const char **argv) header = perf_header__new(); if (raw_samples) { - perf_header__set_trace_info(); + perf_header__feat_trace_info(header); } else { for (i = 0; i < nr_counters; i++) { if (attrs[i].sample_type & PERF_SAMPLE_RAW) { - perf_header__set_trace_info(); + perf_header__feat_trace_info(header); break; } } diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 9aae360c0f28..171d51b6f359 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -8,6 +8,8 @@ #include "../perf.h" #include "trace-event.h" +#include + /* * Create new perf.data header attribute: */ @@ -48,25 +50,17 @@ void perf_header_attr__add_id(struct perf_header_attr *self, u64 id) */ struct perf_header *perf_header__new(void) { - struct perf_header *self = malloc(sizeof(*self)); + struct perf_header *self = calloc(sizeof(*self), 1); if (!self) die("nomem"); - self->frozen = 0; - - self->attrs = 0; self->size = 1; self->attr = malloc(sizeof(void *)); if (!self->attr) die("nomem"); - self->data_offset = 0; - self->data_size = 0; - self->trace_info_offset = 0; - self->trace_info_size = 0; - return self; } @@ -149,14 +143,12 @@ struct perf_file_header { struct perf_file_section attrs; struct perf_file_section data; struct perf_file_section event_types; - struct perf_file_section trace_info; + feat_mask_t adds_features; }; -static int trace_info; - -void perf_header__set_trace_info(void) +void perf_header__feat_trace_info(struct perf_header *header) { - trace_info = 1; + set_bit(HEADER_TRACE_INFO, perf_header__adds_mask(header)); } static void do_write(int fd, void *buf, size_t size) @@ -172,6 +164,32 @@ static void do_write(int fd, void *buf, size_t size) } } +static void perf_header__adds_write(struct perf_header *self, int fd) +{ + struct perf_file_section trace_sec; + u64 cur_offset = lseek(fd, 0, SEEK_CUR); + unsigned long *feat_mask = perf_header__adds_mask(self); + + if (test_bit(HEADER_TRACE_INFO, feat_mask)) { + /* Write trace info */ + trace_sec.offset = lseek(fd, sizeof(trace_sec), SEEK_CUR); + read_tracing_data(fd, attrs, nr_counters); + trace_sec.size = lseek(fd, 0, SEEK_CUR) - trace_sec.offset; + + /* Write trace info headers */ + lseek(fd, cur_offset, SEEK_SET); + do_write(fd, &trace_sec, sizeof(trace_sec)); + + /* + * Update cur_offset. So that other (future) + * features can set their own infos in this place. But if we are + * the only feature, at least that seeks to the place the data + * should begin. + */ + cur_offset = lseek(fd, trace_sec.offset + trace_sec.size, SEEK_SET); + } +}; + void perf_header__write(struct perf_header *self, int fd) { struct perf_file_header f_header; @@ -210,23 +228,7 @@ void perf_header__write(struct perf_header *self, int fd) if (events) do_write(fd, events, self->event_size); - if (trace_info) { - static int trace_info_written; - - /* - * Write it only once - */ - if (!trace_info_written) { - self->trace_info_offset = lseek(fd, 0, SEEK_CUR); - read_tracing_data(fd, attrs, nr_counters); - self->trace_info_size = lseek(fd, 0, SEEK_CUR) - - self->trace_info_offset; - trace_info_written = 1; - } else { - lseek(fd, self->trace_info_offset + - self->trace_info_size, SEEK_SET); - } - } + perf_header__adds_write(self, fd); self->data_offset = lseek(fd, 0, SEEK_CUR); @@ -246,12 +248,10 @@ void perf_header__write(struct perf_header *self, int fd) .offset = self->event_offset, .size = self->event_size, }, - .trace_info = { - .offset = self->trace_info_offset, - .size = self->trace_info_size, - }, }; + memcpy(&f_header.adds_features, &self->adds_features, sizeof(feat_mask_t)); + lseek(fd, 0, SEEK_SET); do_write(fd, &f_header, sizeof(f_header)); lseek(fd, self->data_offset + self->data_size, SEEK_SET); @@ -274,6 +274,20 @@ static void do_read(int fd, void *buf, size_t size) } } +static void perf_header__adds_read(struct perf_header *self, int fd) +{ + const unsigned long *feat_mask = perf_header__adds_mask(self); + + if (test_bit(HEADER_TRACE_INFO, feat_mask)) { + struct perf_file_section trace_sec; + + do_read(fd, &trace_sec, sizeof(trace_sec)); + lseek(fd, trace_sec.offset, SEEK_SET); + trace_report(fd); + lseek(fd, trace_sec.offset + trace_sec.size, SEEK_SET); + } +}; + struct perf_header *perf_header__read(int fd) { struct perf_header *self = perf_header__new(); @@ -292,9 +306,11 @@ struct perf_header *perf_header__read(int fd) if (f_header.size != sizeof(f_header)) { /* Support the previous format */ - if (f_header.size == offsetof(typeof(f_header), trace_info)) - f_header.trace_info.size = 0; - else + if (f_header.size == offsetof(typeof(f_header), adds_features)) { + unsigned long *mask = (unsigned long *)(void *) + &f_header.adds_features; + bitmap_zero(mask, HEADER_FEAT_BITS); + } else die("incompatible file format"); } nr_attrs = f_header.attrs.size / sizeof(f_attr); @@ -330,13 +346,9 @@ struct perf_header *perf_header__read(int fd) event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); } - self->trace_info_offset = f_header.trace_info.offset; - self->trace_info_size = f_header.trace_info.size; + memcpy(&self->adds_features, &f_header.adds_features, sizeof(feat_mask_t)); - if (self->trace_info_size) { - lseek(fd, self->trace_info_offset, SEEK_SET); - trace_report(fd); - } + perf_header__adds_read(self, fd); self->event_offset = f_header.event_types.offset; self->event_size = f_header.event_types.size; diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 30aee5160dc0..0eb4a9126b7c 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -12,19 +12,29 @@ struct perf_header_attr { off_t id_offset; }; +#define HEADER_TRACE_INFO 1 + +#define HEADER_FEAT_BITS 256 + +typedef typeof(u64[HEADER_FEAT_BITS / 8]) feat_mask_t; + struct perf_header { - int frozen; - int attrs, size; + int frozen; + int attrs, size; struct perf_header_attr **attr; - s64 attr_offset; - u64 data_offset; - u64 data_size; - u64 event_offset; - u64 event_size; - u64 trace_info_offset; - u64 trace_info_size; + s64 attr_offset; + u64 data_offset; + u64 data_size; + u64 event_offset; + u64 event_size; + feat_mask_t adds_features; }; +static inline unsigned long *perf_header__adds_mask(struct perf_header *self) +{ + return (unsigned long *)(void *)&self->adds_features; +} + struct perf_header *perf_header__read(int fd); void perf_header__write(struct perf_header *self, int fd); @@ -42,7 +52,7 @@ void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); u64 perf_header__sample_type(struct perf_header *header); struct perf_event_attr * perf_header__find_attr(u64 id, struct perf_header *header); -void perf_header__set_trace_info(void); +void perf_header__feat_trace_info(struct perf_header *header); struct perf_header *perf_header__new(void); -- cgit v1.2.3 From db9f11e36d0125a5e3e595ea9ef2e4b89f7e8737 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 17 Oct 2009 17:57:18 +0200 Subject: perf tools: Use DECLARE_BITMAP instead of an open-coded array Use DECLARE_BITMAP instead of an open coded array for our bitmap of featured sections. This makes the array an unsigned long instead of a u64 but since we use a 256 bits bitmap, the array size shouldn't vary between different boxes. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Steven Rostedt LKML-Reference: <1255795038-13751-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/util/header.c | 22 +++++++++------------- tools/perf/util/header.h | 11 +++-------- tools/perf/util/include/asm/asm-offsets.h | 1 + tools/perf/util/include/linux/types.h | 8 +++++++- 4 files changed, 20 insertions(+), 22 deletions(-) create mode 100644 tools/perf/util/include/asm/asm-offsets.h diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 171d51b6f359..622c60e45254 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -8,8 +8,6 @@ #include "../perf.h" #include "trace-event.h" -#include - /* * Create new perf.data header attribute: */ @@ -143,12 +141,12 @@ struct perf_file_header { struct perf_file_section attrs; struct perf_file_section data; struct perf_file_section event_types; - feat_mask_t adds_features; + DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); }; void perf_header__feat_trace_info(struct perf_header *header) { - set_bit(HEADER_TRACE_INFO, perf_header__adds_mask(header)); + set_bit(HEADER_TRACE_INFO, header->adds_features); } static void do_write(int fd, void *buf, size_t size) @@ -168,7 +166,7 @@ static void perf_header__adds_write(struct perf_header *self, int fd) { struct perf_file_section trace_sec; u64 cur_offset = lseek(fd, 0, SEEK_CUR); - unsigned long *feat_mask = perf_header__adds_mask(self); + unsigned long *feat_mask = self->adds_features; if (test_bit(HEADER_TRACE_INFO, feat_mask)) { /* Write trace info */ @@ -250,7 +248,7 @@ void perf_header__write(struct perf_header *self, int fd) }, }; - memcpy(&f_header.adds_features, &self->adds_features, sizeof(feat_mask_t)); + memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features)); lseek(fd, 0, SEEK_SET); do_write(fd, &f_header, sizeof(f_header)); @@ -276,7 +274,7 @@ static void do_read(int fd, void *buf, size_t size) static void perf_header__adds_read(struct perf_header *self, int fd) { - const unsigned long *feat_mask = perf_header__adds_mask(self); + const unsigned long *feat_mask = self->adds_features; if (test_bit(HEADER_TRACE_INFO, feat_mask)) { struct perf_file_section trace_sec; @@ -306,11 +304,9 @@ struct perf_header *perf_header__read(int fd) if (f_header.size != sizeof(f_header)) { /* Support the previous format */ - if (f_header.size == offsetof(typeof(f_header), adds_features)) { - unsigned long *mask = (unsigned long *)(void *) - &f_header.adds_features; - bitmap_zero(mask, HEADER_FEAT_BITS); - } else + if (f_header.size == offsetof(typeof(f_header), adds_features)) + bitmap_zero(f_header.adds_features, HEADER_FEAT_BITS); + else die("incompatible file format"); } nr_attrs = f_header.attrs.size / sizeof(f_attr); @@ -346,7 +342,7 @@ struct perf_header *perf_header__read(int fd) event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); } - memcpy(&self->adds_features, &f_header.adds_features, sizeof(feat_mask_t)); + memcpy(&self->adds_features, &f_header.adds_features, sizeof(f_header.adds_features)); perf_header__adds_read(self, fd); diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 0eb4a9126b7c..2ea9dfb1236a 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -5,6 +5,8 @@ #include #include "types.h" +#include + struct perf_header_attr { struct perf_event_attr attr; int ids, size; @@ -16,8 +18,6 @@ struct perf_header_attr { #define HEADER_FEAT_BITS 256 -typedef typeof(u64[HEADER_FEAT_BITS / 8]) feat_mask_t; - struct perf_header { int frozen; int attrs, size; @@ -27,14 +27,9 @@ struct perf_header { u64 data_size; u64 event_offset; u64 event_size; - feat_mask_t adds_features; + DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); }; -static inline unsigned long *perf_header__adds_mask(struct perf_header *self) -{ - return (unsigned long *)(void *)&self->adds_features; -} - struct perf_header *perf_header__read(int fd); void perf_header__write(struct perf_header *self, int fd); diff --git a/tools/perf/util/include/asm/asm-offsets.h b/tools/perf/util/include/asm/asm-offsets.h new file mode 100644 index 000000000000..ed538942523d --- /dev/null +++ b/tools/perf/util/include/asm/asm-offsets.h @@ -0,0 +1 @@ +/* stub */ diff --git a/tools/perf/util/include/linux/types.h b/tools/perf/util/include/linux/types.h index ed538942523d..858a38d08435 100644 --- a/tools/perf/util/include/linux/types.h +++ b/tools/perf/util/include/linux/types.h @@ -1 +1,7 @@ -/* stub */ +#ifndef _PERF_LINUX_TYPES_H_ +#define _PERF_LINUX_TYPES_H_ + +#define DECLARE_BITMAP(name,bits) \ + unsigned long name[BITS_TO_LONGS(bits)] + +#endif -- cgit v1.2.3 From dc79959aaf80e518741657a702fa2727c86c1189 Mon Sep 17 00:00:00 2001 From: Tim Blechmann Date: Sat, 17 Oct 2009 18:08:29 +0200 Subject: perf top: Fix --delay_secs 0 division by zero Add delay_secs sanity check to handle_keypress, this fixes a division by zero crash. Signed-off-by: Tim Blechmann Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <4AD9EBFD.106@klingt.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 37512e936235..a1b1d10912dc 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -686,6 +686,8 @@ static void handle_keypress(int c) switch (c) { case 'd': prompt_integer(&delay_secs, "Enter display delay"); + if (delay_secs < 1) + delay_secs = 1; break; case 'e': prompt_integer(&print_entries, "Enter display entries (lines)"); -- cgit v1.2.3 From 1abc7f5500fff8422f34826a006648d8741d83d3 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 18 Oct 2009 19:20:24 -0700 Subject: perf tools: Display better error messages on missing packages Check for libelf headers and glibc headers separately so that the error message correctly identifies which package installation is missing/needed. Signed-off-by: Randy Dunlap Cc: paulus@samba.org Cc: a.p.zijlstra@chello.nl Cc: efault@gmx.de Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: <4ADBCCE8.3060300@oracle.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 2400e5068a2f..db89a6de9d06 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -431,8 +431,12 @@ ifeq ($(uname_S),Darwin) PTHREAD_LIBS = endif +ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) + msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]); +endif + ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) - msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]); + msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel); endif ifdef NO_DEMANGLE -- cgit v1.2.3 From 12133afffcc7140eea915b1572189a2ea0cf7b0e Mon Sep 17 00:00:00 2001 From: Tim Blechmann Date: Mon, 19 Oct 2009 12:03:33 +0200 Subject: perf stat: Add branch performance events to default output Adds performance event information about branches and branch misses to the default output of perf stat. Signed-off-by: Tim Blechmann Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <4ADC3975.8050109@klingt.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index c37368343fff..95a55eaf72f5 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -59,6 +59,8 @@ static struct perf_event_attr default_attrs[] = { { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES}, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS}, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, }; -- cgit v1.2.3 From 56aab464ff6232bcc2f53b26576983dc83f75db7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 19 Oct 2009 13:27:08 +0200 Subject: perf stat: Re-align the default_attrs[] array Clean up the array definition to be vertically aligned. No functional effects. Cc: Tim Blechmann Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <4ADC3975.8050109@klingt.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index c373683..95a55ea 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -59,6 +59,8 @@ static struct perf_event_attr default_attrs[] = { { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES}, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS}, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, }; --- tools/perf/builtin-stat.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 95a55eaf72f5..90e0a268343d 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -50,17 +50,17 @@ static struct perf_event_attr default_attrs[] = { - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES}, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, - - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES}, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS}, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, + + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, }; -- cgit v1.2.3 From dd86e72abdbc4b436471af5a97927c6145f5298c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 19 Oct 2009 13:33:03 +0200 Subject: perf stat: Count branches first Count branches first, cache-misses second. The reason is that on x86 branches are not counted by all counters on all CPUs. Before: Performance counter stats for 'ls': 0.756653 task-clock-msecs # 0.802 CPUs 0 context-switches # 0.000 M/sec 0 CPU-migrations # 0.000 M/sec 250 page-faults # 0.330 M/sec 2375725 cycles # 3139.781 M/sec 1628129 instructions # 0.685 IPC 19643 cache-references # 25.960 M/sec 4608 cache-misses # 6.090 M/sec 342532 branches # 452.694 M/sec branch-misses 0.000943356 seconds time elapsed After: Performance counter stats for 'ls': 1.056734 task-clock-msecs # 0.859 CPUs 0 context-switches # 0.000 M/sec 0 CPU-migrations # 0.000 M/sec 259 page-faults # 0.245 M/sec 3345932 cycles # 3166.295 M/sec 3074090 instructions # 0.919 IPC 616928 branches # 583.806 M/sec 39279 branch-misses # 6.367 % 21312 cache-references # 20.168 M/sec 3661 cache-misses # 3.464 M/sec 0.001230551 seconds time elapsed (also prettify the printout of branch misses, in case it's getting scaled.) Cc: Tim Blechmann Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <4ADC3975.8050109@klingt.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index c373683..95a55ea 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -59,6 +59,8 @@ static struct perf_event_attr default_attrs[] = { { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES}, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS}, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, }; --- tools/perf/builtin-stat.c | 20 ++++++++++---------- 1 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 95a55ea..90e0a26 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -50,17 +50,17 @@ static struct perf_event_attr default_attrs[] = { - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES}, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, - - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES}, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS}, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, + + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, }; --- tools/perf/builtin-stat.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 90e0a268343d..c6df3770b87e 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -57,10 +57,10 @@ static struct perf_event_attr default_attrs[] = { { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, }; @@ -363,7 +363,7 @@ static void abs_printout(int counter, double avg) if (total) ratio = avg * 100 / total; - fprintf(stderr, " # %10.3f %% ", ratio); + fprintf(stderr, " # %10.3f %% ", ratio); } else { total = avg_stats(&runtime_nsecs_stats); -- cgit v1.2.3 From 20639c15d2e78f180d398a6b6422880fac3258bb Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 19 Oct 2009 15:11:36 -0200 Subject: perf tools: Add missing tools/perf/util/include/string.h To cure a bunch of: In file included from util/include/linux/bitmap.h:1, from util/header.h:8, from builtin-trace.c:7: util/include/../../../../include/linux/bitmap.h:8:26: error: linux/string.h: No such file or directory make: *** [builtin-trace.o] Error 1 make: *** Waiting for unfinished jobs.... Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Peter Zijlstra LKML-Reference: <1255972296-11500-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 + tools/perf/util/include/linux/string.h | 1 + 2 files changed, 2 insertions(+) create mode 100644 tools/perf/util/include/linux/string.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index db89a6de9d06..3b154f17d95b 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -329,6 +329,7 @@ LIB_H += ../../include/linux/perf_event.h LIB_H += ../../include/linux/rbtree.h LIB_H += ../../include/linux/list.h LIB_H += util/include/linux/list.h +LIB_H += util/include/linux/string.h LIB_H += perf.h LIB_H += util/event.h LIB_H += util/types.h diff --git a/tools/perf/util/include/linux/string.h b/tools/perf/util/include/linux/string.h new file mode 100644 index 000000000000..3b2f5900276f --- /dev/null +++ b/tools/perf/util/include/linux/string.h @@ -0,0 +1 @@ +#include -- cgit v1.2.3 From 79b9ad361be8c6f3eeea97dd3883e8bcfa989333 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 19 Oct 2009 15:31:31 -0200 Subject: perf tools: Add bunch of missing headers to LIB_H Build dependencies were not properly mapped out. Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Frederic Weisbecker LKML-Reference: <1255973491-11626-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 3b154f17d95b..64c6b7b57d85 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -328,8 +328,25 @@ LIB_FILE=libperf.a LIB_H += ../../include/linux/perf_event.h LIB_H += ../../include/linux/rbtree.h LIB_H += ../../include/linux/list.h +LIB_H += util/include/linux/bitmap.h +LIB_H += util/include/linux/bitops.h +LIB_H += util/include/linux/compiler.h +LIB_H += util/include/linux/ctype.h +LIB_H += util/include/linux/kernel.h LIB_H += util/include/linux/list.h +LIB_H += util/include/linux/module.h +LIB_H += util/include/linux/poison.h +LIB_H += util/include/linux/prefetch.h +LIB_H += util/include/linux/rbtree.h LIB_H += util/include/linux/string.h +LIB_H += util/include/linux/types.h +LIB_H += util/include/asm/asm-offsets.h +LIB_H += util/include/asm/bitops.h +LIB_H += util/include/asm/byteorder.h +LIB_H += util/include/asm/swab.h +LIB_H += util/include/asm/system.h +LIB_H += util/include/asm/types.h +LIB_H += util/include/asm/uaccess.h LIB_H += perf.h LIB_H += util/event.h LIB_H += util/types.h -- cgit v1.2.3 From 3bc2a39c69d423d5d1f0b3ef77960b1464c976a0 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Tue, 20 Oct 2009 06:46:49 +0900 Subject: perf timechart: Fix the wakeup-arrows that point to non-visible processes The timechart wakeup arrows currently show no process information when the waker/wakee are processes that are not actually chosen to be shown on the timechart. This patch fixes this oversight, by looking through all processes (after giving preference to visible processes) as well as falling back to just showing the PID if no name for the process can be resolved. Signed-off-by: Arjan van de Ven Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <20091020064649.0e4959b2@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-timechart.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 702d8fe58fbc..e8a510d935e5 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -765,19 +765,40 @@ static void draw_wakeups(void) if (c->Y && c->start_time <= we->time && c->end_time >= we->time) { if (p->pid == we->waker) { from = c->Y; - task_from = c->comm; + task_from = strdup(c->comm); } if (p->pid == we->wakee) { to = c->Y; - task_to = c->comm; + task_to = strdup(c->comm); } } c = c->next; } + c = p->all; + while (c) { + if (p->pid == we->waker && !from) { + from = c->Y; + task_from = strdup(c->comm); + } + if (p->pid == we->wakee && !to) { + to = c->Y; + task_to = strdup(c->comm); + } + c = c->next; + } } p = p->next; } + if (!task_from) { + task_from = malloc(40); + sprintf(task_from, "[%i]", we->waker); + } + if (!task_to) { + task_to = malloc(40); + sprintf(task_to, "[%i]", we->wakee); + } + if (we->waker == -1) svg_interrupt(we->time, to); else if (from && to && abs(from - to) == 1) @@ -785,6 +806,9 @@ static void draw_wakeups(void) else svg_partial_wakeline(we->time, from, task_from, to, task_to); we = we->next; + + free(task_from); + free(task_to); } } -- cgit v1.2.3 From 2e600d01c131ee189f55ca1879cd364b9e056df8 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Tue, 20 Oct 2009 06:47:31 +0900 Subject: perf timechart: Improve the visual appearance of scheduler delays [from KS feedback] Currently, scheduler delays are shown in a mostly transparent, light yellow color. This color is rather hard to see on several screens, especially projectors. This patch changes the color of the scheduler delays to be a much more "hard" yellow that survived the kernel summit projector. Reported-by: Linus Torvalds Signed-off-by: Arjan van de Ven Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <20091020064731.20ae126a@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/svghelper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c index 856655d8b0b8..b3637db025a2 100644 --- a/tools/perf/util/svghelper.c +++ b/tools/perf/util/svghelper.c @@ -103,7 +103,7 @@ void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end) fprintf(svgfile, " rect.process2 { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:0; stroke:rgb( 0, 0, 0); } \n"); fprintf(svgfile, " rect.sample { fill:rgb( 0, 0,255); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n"); fprintf(svgfile, " rect.blocked { fill:rgb(255, 0, 0); fill-opacity:0.5; stroke-width:0; stroke:rgb( 0, 0, 0); } \n"); - fprintf(svgfile, " rect.waiting { fill:rgb(214,214, 0); fill-opacity:0.3; stroke-width:0; stroke:rgb( 0, 0, 0); } \n"); + fprintf(svgfile, " rect.waiting { fill:rgb(224,214, 0); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n"); fprintf(svgfile, " rect.WAITING { fill:rgb(255,214, 48); fill-opacity:0.6; stroke-width:0; stroke:rgb( 0, 0, 0); } \n"); fprintf(svgfile, " rect.cpu { fill:rgb(192,192,192); fill-opacity:0.2; stroke-width:0.5; stroke:rgb(128,128,128); } \n"); fprintf(svgfile, " rect.pstate { fill:rgb(128,128,128); fill-opacity:0.8; stroke-width:0; } \n"); -- cgit v1.2.3 From bbe2987bea26a684ff11d887dfc4cf39b22c27a2 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Tue, 20 Oct 2009 07:09:39 +0900 Subject: perf timechart: Add a process filter During the Kernel Summit demo of perf/ftrace/timechart, there was a feature request to have a process filter for timechart so that you can zoom into one or a few processes that you are really interested in. This patch adds basic support for this feature, the -p (--process) option now can select a PID or a process name to be shown. Multiple -p options are allowed, and the combined set will be included in the output. Signed-off-by: Arjan van de Ven Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <20091020070939.7d0fb8a7@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/Documentation/perf-timechart.txt | 5 +- tools/perf/builtin-timechart.c | 105 +++++++++++++++++++++++++++- 2 files changed, 106 insertions(+), 4 deletions(-) diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt index a7910099d6fd..4b1788355eca 100644 --- a/tools/perf/Documentation/perf-timechart.txt +++ b/tools/perf/Documentation/perf-timechart.txt @@ -31,9 +31,12 @@ OPTIONS -w:: --width=:: Select the width of the SVG file (default: 1000) --p:: +-P:: --power-only:: Only output the CPU power section of the diagram +-p:: +--process:: + Select the processes to display, by name or PID SEE ALSO diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index e8a510d935e5..34fad57087f9 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -153,6 +153,17 @@ static struct wake_event *wake_events; struct sample_wrapper *all_samples; + +struct process_filter; +struct process_filter { + char *name; + int pid; + struct process_filter *next; +}; + +static struct process_filter *process_filter; + + static struct per_pid *find_create_pid(int pid) { struct per_pid *cursor = all_data; @@ -763,11 +774,11 @@ static void draw_wakeups(void) c = p->all; while (c) { if (c->Y && c->start_time <= we->time && c->end_time >= we->time) { - if (p->pid == we->waker) { + if (p->pid == we->waker && !from) { from = c->Y; task_from = strdup(c->comm); } - if (p->pid == we->wakee) { + if (p->pid == we->wakee && !to) { to = c->Y; task_to = strdup(c->comm); } @@ -882,12 +893,89 @@ static void draw_process_bars(void) } } +static void add_process_filter(const char *string) +{ + struct process_filter *filt; + int pid; + + pid = strtoull(string, NULL, 10); + filt = malloc(sizeof(struct process_filter)); + if (!filt) + return; + + filt->name = strdup(string); + filt->pid = pid; + filt->next = process_filter; + + process_filter = filt; +} + +static int passes_filter(struct per_pid *p, struct per_pidcomm *c) +{ + struct process_filter *filt; + if (!process_filter) + return 1; + + filt = process_filter; + while (filt) { + if (filt->pid && p->pid == filt->pid) + return 1; + if (strcmp(filt->name, c->comm) == 0) + return 1; + filt = filt->next; + } + return 0; +} + +static int determine_display_tasks_filtered(void) +{ + struct per_pid *p; + struct per_pidcomm *c; + int count = 0; + + p = all_data; + while (p) { + p->display = 0; + if (p->start_time == 1) + p->start_time = first_time; + + /* no exit marker, task kept running to the end */ + if (p->end_time == 0) + p->end_time = last_time; + + c = p->all; + + while (c) { + c->display = 0; + + if (c->start_time == 1) + c->start_time = first_time; + + if (passes_filter(p, c)) { + c->display = 1; + p->display = 1; + count++; + } + + if (c->end_time == 0) + c->end_time = last_time; + + c = c->next; + } + p = p->next; + } + return count; +} + static int determine_display_tasks(u64 threshold) { struct per_pid *p; struct per_pidcomm *c; int count = 0; + if (process_filter) + return determine_display_tasks_filtered(); + p = all_data; while (p) { p->display = 0; @@ -1153,6 +1241,14 @@ static int __cmd_record(int argc, const char **argv) return cmd_record(i, rec_argv, NULL); } +static int +parse_process(const struct option *opt __used, const char *arg, int __used unset) +{ + if (arg) + add_process_filter(arg); + return 0; +} + static const struct option options[] = { OPT_STRING('i', "input", &input_name, "file", "input file name"), @@ -1160,8 +1256,11 @@ static const struct option options[] = { "output file name"), OPT_INTEGER('w', "width", &svg_page_width, "page width"), - OPT_BOOLEAN('p', "power-only", &power_only, + OPT_BOOLEAN('P', "power-only", &power_only, "output power data only"), + OPT_CALLBACK('p', "process", NULL, "process", + "process selector. Pass a pid or process name.", + parse_process), OPT_END() }; -- cgit v1.2.3 From ed52ce2e3c33dc7626a40fa2da766d1a6460e543 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 19 Oct 2009 17:17:57 -0200 Subject: perf tools: Add ->unmap_ip operation to struct map We need this because we get section relative addresses when reading the symtabs, but when a tool like 'perf annotate' needs to match these address to what 'objdump -dS' produces we need the address + section back again. So in annotate now we look at the 'struct hist_entry' instances (that weren't really being used) so that we iterate only over the symbols that had some hit and get the map where that particular hit happened so that we can get the right address to match with annotate. Verified that at least: perf annotate mmap_read_counter # Uses the ~/bin/perf binary perf annotate --vmlinux /home/acme/git/build/perf/vmlinux intel_pmu_enable_all on a 'perf record perf top' session seems to work. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1255979877-12533-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 62 +++++++++++++++++++++++++++++-------------- tools/perf/util/event.h | 8 +++++- tools/perf/util/map.c | 6 +++-- tools/perf/util/symbol.c | 8 +++--- 4 files changed, 58 insertions(+), 26 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 56ba71658d70..06f10278b28e 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -58,9 +58,12 @@ static void hist_hit(struct hist_entry *he, u64 ip) return; sym_size = sym->end - sym->start; - ip = he->map->map_ip(he->map, ip); offset = ip - sym->start; + if (verbose) + fprintf(stderr, "%s: ip=%Lx\n", __func__, + he->map->unmap_ip(he->map, ip)); + if (offset >= sym_size) return; @@ -83,8 +86,7 @@ static int hist_entry__add(struct thread *thread, struct map *map, count, level, &hit); if (he == NULL) return -ENOMEM; - if (hit) - hist_hit(he, ip); + hist_hit(he, ip); return 0; } @@ -260,14 +262,15 @@ process_event(event_t *event, unsigned long offset, unsigned long head) return 0; } -static int -parse_line(FILE *file, struct symbol *sym, u64 len) +static int parse_line(FILE *file, struct hist_entry *he, u64 len) { + struct symbol *sym = he->sym; char *line = NULL, *tmp, *tmp2; static const char *prev_line; static const char *prev_color; unsigned int offset; size_t line_len; + u64 start; s64 line_ip; int ret; char *c; @@ -304,6 +307,8 @@ parse_line(FILE *file, struct symbol *sym, u64 len) line_ip = -1; } + start = he->map->unmap_ip(he->map, sym->start); + if (line_ip != -1) { const char *path = NULL; unsigned int hits = 0; @@ -311,7 +316,7 @@ parse_line(FILE *file, struct symbol *sym, u64 len) const char *color; struct sym_ext *sym_ext = sym->priv; - offset = line_ip - sym->start; + offset = line_ip - start; if (offset < len) hits = sym->hist[offset]; @@ -390,8 +395,10 @@ static void free_source_line(struct symbol *sym, int len) /* Get the filename:line for the colored entries */ static void -get_source_line(struct symbol *sym, int len, const char *filename) +get_source_line(struct hist_entry *he, int len, const char *filename) { + struct symbol *sym = he->sym; + u64 start; int i; char cmd[PATH_MAX * 2]; struct sym_ext *sym_ext; @@ -404,6 +411,7 @@ get_source_line(struct symbol *sym, int len, const char *filename) return; sym_ext = sym->priv; + start = he->map->unmap_ip(he->map, sym->start); for (i = 0; i < len; i++) { char *path = NULL; @@ -415,7 +423,7 @@ get_source_line(struct symbol *sym, int len, const char *filename) if (sym_ext[i].percent <= 0.5) continue; - offset = sym->start + i; + offset = start + i; sprintf(cmd, "addr2line -e %s %016llx", filename, offset); fp = popen(cmd, "r"); if (!fp) @@ -465,8 +473,11 @@ static void print_summary(const char *filename) } } -static void annotate_sym(struct dso *dso, struct symbol *sym) +static void annotate_sym(struct hist_entry *he) { + struct map *map = he->map; + struct dso *dso = map->dso; + struct symbol *sym = he->sym; const char *filename = dso->long_name, *d_filename; u64 len; char command[PATH_MAX*2]; @@ -475,6 +486,12 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) if (!filename) return; + if (verbose) + fprintf(stderr, "%s: filename=%s, sym=%s, start=%Lx, end=%Lx\n", + __func__, filename, sym->name, + map->unmap_ip(map, sym->start), + map->unmap_ip(map, sym->end)); + if (full_paths) d_filename = filename; else @@ -483,7 +500,7 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) len = sym->end - sym->start; if (print_line) { - get_source_line(sym, len, filename); + get_source_line(he, len, filename); print_summary(filename); } @@ -496,7 +513,8 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) dso, dso->long_name, sym, sym->name); sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s", - sym->start, sym->end, filename, filename); + map->unmap_ip(map, sym->start), map->unmap_ip(map, sym->end), + filename, filename); if (verbose >= 3) printf("doing: %s\n", command); @@ -506,7 +524,7 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) return; while (!feof(file)) { - if (parse_line(file, sym, len) < 0) + if (parse_line(file, he, len) < 0) break; } @@ -518,18 +536,22 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) static void find_annotations(void) { struct rb_node *nd; - struct dso *dso; int count = 0; - list_for_each_entry(dso, &dsos, node) { + for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { + struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); - for (nd = rb_first(&dso->syms); nd; nd = rb_next(nd)) { - struct symbol *sym = rb_entry(nd, struct symbol, rb_node); + if (he->sym && he->sym->hist) { + annotate_sym(he); + count++; + /* + * Since we have a hist_entry per IP for the same + * symbol, free he->sym->hist to signal we already + * processed this symbol. + */ + free(he->sym->hist); + he->sym->hist = NULL; - if (sym->hist) { - annotate_sym(dso, sym); - count++; - } } } diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index c2e62be62798..6b5be56a8271 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -82,6 +82,7 @@ struct map { u64 end; u64 pgoff; u64 (*map_ip)(struct map *, u64); + u64 (*unmap_ip)(struct map *, u64); struct dso *dso; }; @@ -90,7 +91,12 @@ static inline u64 map__map_ip(struct map *map, u64 ip) return ip - map->start + map->pgoff; } -static inline u64 vdso__map_ip(struct map *map __used, u64 ip) +static inline u64 map__unmap_ip(struct map *map, u64 ip) +{ + return ip + map->start - map->pgoff; +} + +static inline u64 identity__map_ip(struct map *map __used, u64 ip) { return ip; } diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 804e02382739..4e203d144f9e 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -54,9 +54,11 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen) goto out_delete; if (self->dso == vdso || anon) - self->map_ip = vdso__map_ip; - else + self->map_ip = self->unmap_ip = identity__map_ip; + else { self->map_ip = map__map_ip; + self->unmap_ip = map__unmap_ip; + } } return self; out_delete: diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index faa84f5d4f54..3350119f6909 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -337,7 +337,7 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter, int use_modules) return -1; } - map->map_ip = vdso__map_ip; + map->map_ip = map->unmap_ip = identity__map_ip; kernel_maps__insert(map); ++kernel_range; } @@ -790,7 +790,8 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, dso__delete(curr_dso); goto out_elf_end; } - curr_map->map_ip = vdso__map_ip; + curr_map->map_ip = identity__map_ip; + curr_map->unmap_ip = identity__map_ip; curr_dso->origin = DSO__ORIG_KERNEL; kernel_maps__insert(curr_map); dsos__add(curr_dso); @@ -1158,6 +1159,7 @@ static struct map *map__new2(u64 start, struct dso *dso) self->pgoff = 0; self->dso = dso; self->map_ip = map__map_ip; + self->unmap_ip = map__unmap_ip; RB_CLEAR_NODE(&self->rb_node); } return self; @@ -1259,7 +1261,7 @@ int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, if (kernel_map == NULL) goto out_delete_dso; - kernel_map->map_ip = vdso__map_ip; + kernel_map->map_ip = kernel_map->unmap_ip = identity__map_ip; if (use_modules && dsos__load_modules(sym_priv_size) < 0) { fprintf(stderr, "Failed to load list of modules in use! " -- cgit v1.2.3 From e42049926ebdcae24fdfdc8f0e3ff8f05f24a60b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 20 Oct 2009 14:25:40 -0200 Subject: perf annotate: Use the sym_priv_size area for the histogram We have this sym_priv_size mechanism for attaching private areas to struct symbol entries but annotate wasn't using it, adding private areas to struct symbol in addition to a ->priv pointer. Scrap all that and use the sym_priv_size mechanism. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1256055940-19511-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 109 ++++++++++++++++++++++++++++++------------ tools/perf/builtin-report.c | 2 +- tools/perf/util/data_map.c | 2 +- tools/perf/util/event.h | 8 +++- tools/perf/util/map.c | 21 +++++++- tools/perf/util/symbol.c | 56 ++++++++-------------- tools/perf/util/symbol.h | 17 +++---- 7 files changed, 132 insertions(+), 83 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 06f10278b28e..245692530de1 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -37,12 +37,44 @@ static int print_line; static unsigned long page_size; static unsigned long mmap_window = 32; +struct sym_hist { + u64 sum; + u64 ip[0]; +}; + struct sym_ext { struct rb_node node; double percent; char *path; }; +struct sym_priv { + struct sym_hist *hist; + struct sym_ext *ext; +}; + +static const char *sym_hist_filter; + +static int symbol_filter(struct map *map, struct symbol *sym) +{ + if (strcmp(sym->name, sym_hist_filter) == 0) { + struct sym_priv *priv = dso__sym_priv(map->dso, sym); + const int size = (sizeof(*priv->hist) + + (sym->end - sym->start) * sizeof(u64)); + + priv->hist = malloc(size); + if (priv->hist) + memset(priv->hist, 0, size); + return 0; + } + /* + * FIXME: We should really filter it out, as we don't want to go thru symbols + * we're not interested, and if a DSO ends up with no symbols, delete it too, + * but right now the kernel loading routines in symbol.c bail out if no symbols + * are found, fix it later. + */ + return 0; +} /* * collect histogram counts @@ -51,10 +83,16 @@ static void hist_hit(struct hist_entry *he, u64 ip) { unsigned int sym_size, offset; struct symbol *sym = he->sym; + struct sym_priv *priv; + struct sym_hist *h; he->count++; - if (!sym || !sym->hist) + if (!sym || !he->map) + return; + + priv = dso__sym_priv(he->map->dso, sym); + if (!priv->hist) return; sym_size = sym->end - sym->start; @@ -67,15 +105,16 @@ static void hist_hit(struct hist_entry *he, u64 ip) if (offset >= sym_size) return; - sym->hist_sum++; - sym->hist[offset]++; + h = priv->hist; + h->sum++; + h->ip[offset]++; if (verbose >= 3) printf("%p %s: count++ [ip: %p, %08Lx] => %Ld\n", (void *)(unsigned long)he->sym->start, he->sym->name, (void *)(unsigned long)ip, ip - he->sym->start, - sym->hist[offset]); + h->ip[offset]); } static int hist_entry__add(struct thread *thread, struct map *map, @@ -162,7 +201,9 @@ got_map: static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { - struct map *map = map__new(&event->mmap, NULL, 0); + struct map *map = map__new(&event->mmap, NULL, 0, + sizeof(struct sym_priv), symbol_filter, + verbose); struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n", @@ -314,17 +355,19 @@ static int parse_line(FILE *file, struct hist_entry *he, u64 len) unsigned int hits = 0; double percent = 0.0; const char *color; - struct sym_ext *sym_ext = sym->priv; + struct sym_priv *priv = dso__sym_priv(he->map->dso, sym); + struct sym_ext *sym_ext = priv->ext; + struct sym_hist *h = priv->hist; offset = line_ip - start; if (offset < len) - hits = sym->hist[offset]; + hits = h->ip[offset]; if (offset < len && sym_ext) { path = sym_ext[offset].path; percent = sym_ext[offset].percent; - } else if (sym->hist_sum) - percent = 100.0 * hits / sym->hist_sum; + } else if (h->sum) + percent = 100.0 * hits / h->sum; color = get_percent_color(percent); @@ -377,9 +420,10 @@ static void insert_source_line(struct sym_ext *sym_ext) rb_insert_color(&sym_ext->node, &root_sym_ext); } -static void free_source_line(struct symbol *sym, int len) +static void free_source_line(struct hist_entry *he, int len) { - struct sym_ext *sym_ext = sym->priv; + struct sym_priv *priv = dso__sym_priv(he->map->dso, he->sym); + struct sym_ext *sym_ext = priv->ext; int i; if (!sym_ext) @@ -389,7 +433,7 @@ static void free_source_line(struct symbol *sym, int len) free(sym_ext[i].path); free(sym_ext); - sym->priv = NULL; + priv->ext = NULL; root_sym_ext = RB_ROOT; } @@ -402,15 +446,16 @@ get_source_line(struct hist_entry *he, int len, const char *filename) int i; char cmd[PATH_MAX * 2]; struct sym_ext *sym_ext; + struct sym_priv *priv = dso__sym_priv(he->map->dso, sym); + struct sym_hist *h = priv->hist; - if (!sym->hist_sum) + if (!h->sum) return; - sym->priv = calloc(len, sizeof(struct sym_ext)); - if (!sym->priv) + sym_ext = priv->ext = calloc(len, sizeof(struct sym_ext)); + if (!priv->ext) return; - sym_ext = sym->priv; start = he->map->unmap_ip(he->map, sym->start); for (i = 0; i < len; i++) { @@ -419,7 +464,7 @@ get_source_line(struct hist_entry *he, int len, const char *filename) u64 offset; FILE *fp; - sym_ext[i].percent = 100.0 * sym->hist[i] / sym->hist_sum; + sym_ext[i].percent = 100.0 * h->ip[i] / h->sum; if (sym_ext[i].percent <= 0.5) continue; @@ -530,7 +575,7 @@ static void annotate_sym(struct hist_entry *he) pclose(file); if (print_line) - free_source_line(sym, len); + free_source_line(he, len); } static void find_annotations(void) @@ -540,19 +585,23 @@ static void find_annotations(void) for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); + struct sym_priv *priv; - if (he->sym && he->sym->hist) { - annotate_sym(he); - count++; - /* - * Since we have a hist_entry per IP for the same - * symbol, free he->sym->hist to signal we already - * processed this symbol. - */ - free(he->sym->hist); - he->sym->hist = NULL; + if (he->sym == NULL) + continue; - } + priv = dso__sym_priv(he->map->dso, he->sym); + if (priv->hist == NULL) + continue; + + annotate_sym(he); + count++; + /* + * Since we have a hist_entry per IP for the same symbol, free + * he->sym->hist to signal we already processed this symbol. + */ + free(priv->hist); + priv->hist = NULL; } if (!count) @@ -593,7 +642,7 @@ static int __cmd_annotate(void) exit(0); } - if (load_kernel() < 0) { + if (load_kernel(sizeof(struct sym_priv), symbol_filter) < 0) { perror("failed to load kernel symbols"); return EXIT_FAILURE; } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index a4f8cc209151..bee207ce589a 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -680,7 +680,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { - struct map *map = map__new(&event->mmap, cwd, cwdlen); + struct map *map = map__new(&event->mmap, cwd, cwdlen, 0, NULL, verbose); struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c index 242b0555ab91..18accb8fee4d 100644 --- a/tools/perf/util/data_map.c +++ b/tools/perf/util/data_map.c @@ -130,7 +130,7 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, if (curr_handler->sample_type_check(sample_type) < 0) exit(-1); - if (load_kernel() < 0) { + if (load_kernel(0, NULL) < 0) { perror("failed to load kernel symbols"); return EXIT_FAILURE; } diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 6b5be56a8271..db59c8bbe49a 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -101,7 +101,13 @@ static inline u64 identity__map_ip(struct map *map __used, u64 ip) return ip; } -struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen); +struct symbol; + +typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); + +struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, + unsigned int sym_priv_size, symbol_filter_t filter, + int v); struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *self, FILE *fp); diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 4e203d144f9e..55079c0200e0 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -3,6 +3,7 @@ #include #include #include +#include "debug.h" static inline int is_anon_memory(const char *filename) { @@ -19,7 +20,9 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen) return n; } - struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen) +struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, + unsigned int sym_priv_size, symbol_filter_t filter, + int v) { struct map *self = malloc(sizeof(*self)); @@ -27,6 +30,7 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen) const char *filename = event->filename; char newfilename[PATH_MAX]; int anon; + bool new_dso; if (cwd) { int n = strcommon(filename, cwd, cwdlen); @@ -49,10 +53,23 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen) self->end = event->start + event->len; self->pgoff = event->pgoff; - self->dso = dsos__findnew(filename); + self->dso = dsos__findnew(filename, sym_priv_size, &new_dso); if (self->dso == NULL) goto out_delete; + if (new_dso) { + int nr = dso__load(self->dso, self, filter, v); + + if (nr < 0) + eprintf("Failed to open %s, continuing " + "without symbols\n", + self->dso->long_name); + else if (nr == 0) + eprintf("No symbols found in %s, maybe " + "install a debug package?\n", + self->dso->long_name); + } + if (self->dso == vdso || anon) self->map_ip = self->unmap_ip = identity__map_ip; else { diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 3350119f6909..0a4898480d6d 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -11,8 +11,6 @@ #include #include -const char *sym_hist_filter; - enum dso_origin { DSO__ORIG_KERNEL = 0, DSO__ORIG_JAVA_JIT, @@ -86,22 +84,16 @@ static struct symbol *symbol__new(u64 start, u64 len, const char *name, if (!self) return NULL; - if (v > 2) - printf("new symbol: %016Lx [%08lx]: %s, hist: %p\n", - start, (unsigned long)len, name, self->hist); - - self->hist = NULL; - self->hist_sum = 0; - - if (sym_hist_filter && !strcmp(name, sym_hist_filter)) - self->hist = calloc(sizeof(u64), len); - if (priv_size) { memset(self, 0, priv_size); self = ((void *)self) + priv_size; } self->start = start; self->end = len ? start + len - 1 : start; + + if (v > 2) + printf("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end); + memcpy(self->name, name, namelen); return self; @@ -919,7 +911,8 @@ char dso__symtab_origin(const struct dso *self) return origin[self->origin]; } -int dso__load(struct dso *self, struct map *map, symbol_filter_t filter, int v) +int dso__load(struct dso *self, struct map *map, + symbol_filter_t filter, int v) { int size = PATH_MAX; char *name = malloc(size), *build_id = NULL; @@ -1335,33 +1328,21 @@ static struct dso *dsos__find(const char *name) return NULL; } -struct dso *dsos__findnew(const char *name) +struct dso *dsos__findnew(const char *name, unsigned int sym_priv_size, + bool *is_new) { struct dso *dso = dsos__find(name); - int nr; - - if (dso) - return dso; - - dso = dso__new(name, 0); - if (!dso) - goto out_delete_dso; - nr = dso__load(dso, NULL, NULL, verbose); - if (nr < 0) { - eprintf("Failed to open: %s\n", name); - goto out_delete_dso; - } - if (!nr) - eprintf("No symbols found in: %s, maybe install a debug package?\n", name); - - dsos__add(dso); + if (!dso) { + dso = dso__new(name, sym_priv_size); + if (dso) { + dsos__add(dso); + *is_new = true; + } + } else + *is_new = false; return dso; - -out_delete_dso: - dso__delete(dso); - return NULL; } void dsos__fprintf(FILE *fp) @@ -1372,9 +1353,10 @@ void dsos__fprintf(FILE *fp) dso__fprintf(pos, fp); } -int load_kernel(void) +int load_kernel(unsigned int sym_priv_size, symbol_filter_t filter) { - if (dsos__load_kernel(vmlinux_name, 0, NULL, verbose, modules) <= 0) + if (dsos__load_kernel(vmlinux_name, sym_priv_size, + filter, verbose, modules) <= 0) return -1; vdso = dso__new("[vdso]", 0); diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 2e4522edeb07..c2a777de9b7e 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -2,6 +2,7 @@ #define __PERF_SYMBOL 1 #include +#include #include "types.h" #include #include @@ -35,9 +36,6 @@ struct symbol { struct rb_node rb_node; u64 start; u64 end; - u64 hist_sum; - u64 *hist; - void *priv; char name[0]; }; @@ -54,10 +52,6 @@ struct dso { char name[0]; }; -extern const char *sym_hist_filter; - -typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); - struct dso *dso__new(const char *name, unsigned int sym_priv_size); void dso__delete(struct dso *self); @@ -70,15 +64,16 @@ struct symbol *dso__find_symbol(struct dso *self, u64 ip); int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, symbol_filter_t filter, int verbose, int modules); -int dso__load(struct dso *self, struct map *map, symbol_filter_t filter, - int verbose); -struct dso *dsos__findnew(const char *name); +struct dso *dsos__findnew(const char *name, unsigned int sym_priv_size, + bool *is_new); +int dso__load(struct dso *self, struct map *map, + symbol_filter_t filter, int v); void dsos__fprintf(FILE *fp); size_t dso__fprintf(struct dso *self, FILE *fp); char dso__symtab_origin(const struct dso *self); -int load_kernel(void); +int load_kernel(unsigned int sym_priv_size, symbol_filter_t filter); void symbol__init(void); -- cgit v1.2.3 From 8f0b037398a909ccf703ad5f5803066db6327f22 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 20 Oct 2009 15:08:29 -0200 Subject: perf annotate: Remove requirement of passing a symbol name If the user doesn't pass a symbol name to annotate, it will annotate all the symbols that have hits, in order, just like 'perf report -s comm,dso,symbol'. This is a natural followup patch to the one that uses output_hists to find the symbols with hits. The common case is to annotate the first few entries at the top of a perf report, so lets type less characters. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1256058509-19678-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 245692530de1..99bac6aa72c4 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -57,7 +57,8 @@ static const char *sym_hist_filter; static int symbol_filter(struct map *map, struct symbol *sym) { - if (strcmp(sym->name, sym_hist_filter) == 0) { + if (sym_hist_filter == NULL || + strcmp(sym->name, sym_hist_filter) == 0) { struct sym_priv *priv = dso__sym_priv(map->dso, sym); const int size = (sizeof(*priv->hist) + (sym->end - sym->start) * sizeof(u64)); @@ -581,7 +582,6 @@ static void annotate_sym(struct hist_entry *he) static void find_annotations(void) { struct rb_node *nd; - int count = 0; for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); @@ -595,7 +595,6 @@ static void find_annotations(void) continue; annotate_sym(he); - count++; /* * Since we have a hist_entry per IP for the same symbol, free * he->sym->hist to signal we already processed this symbol. @@ -603,9 +602,6 @@ static void find_annotations(void) free(priv->hist); priv->hist = NULL; } - - if (!count) - printf(" Error: symbol '%s' not present amongst the samples.\n", sym_hist_filter); } static int __cmd_annotate(void) @@ -793,9 +789,6 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used) sym_hist_filter = argv[0]; } - if (!sym_hist_filter) - usage_with_options(annotate_usage, options); - setup_pager(); if (field_sep && *field_sep == '.') { -- cgit v1.2.3 From c88e4bf60de6253a048cf4e6b3b0715e543e0460 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 20 Oct 2009 15:54:55 -0200 Subject: perf top: Fix symbol annotation We need to use map->unmap_ip() here too to match section relative symbol address to the absolute address needed to match objdump -dS addresses. Reported-by: Mike Galbraith Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1256061295-19835-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index cc6628630303..fa20345a0ab0 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -141,7 +141,8 @@ static void parse_source(struct sym_entry *syme) sprintf(command, "objdump --start-address=0x%016Lx " "--stop-address=0x%016Lx -dS %s", - sym->start, sym->end, path); + map->unmap_ip(map, sym->start), + map->unmap_ip(map, sym->end), path); file = popen(command, "r"); if (!file) @@ -173,11 +174,11 @@ static void parse_source(struct sym_entry *syme) if (strlen(src->line)>8 && src->line[8] == ':') { src->eip = strtoull(src->line, NULL, 16); - src->eip += map->start; + src->eip = map->unmap_ip(map, src->eip); } if (strlen(src->line)>8 && src->line[16] == ':') { src->eip = strtoull(src->line, NULL, 16); - src->eip += map->start; + src->eip = map->unmap_ip(map, src->eip); } } pclose(file); -- cgit v1.2.3 From 60d526f7fa6246b8e32d5b45610d625a5608d988 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 20 Oct 2009 19:19:34 -0400 Subject: perf tools: Add 'make DEBUG=1' to remove the -O6 cflag When using gdb to debug perf, it is practically impossible to use when perf is compiled with -O6. For developers, this patch adds the DEBUG feature to the make command line so that a developer can easily remove the optimization flag. LKML-Reference: <1255590330.8392.446.camel@twins> Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091020232033.984323261@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 64c6b7b57d85..65e6e52fe378 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -201,7 +201,14 @@ EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wold-style-definition EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement -CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) +ifeq ("$(origin DEBUG)", "command line") + PERF_DEBUG = $(DEBUG) +endif +ifndef PERF_DEBUG + CFLAGS_OPTIMIZE = -O6 +endif + +CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) LDFLAGS = -lpthread -lrt -lelf -lm ALL_CFLAGS = $(CFLAGS) ALL_LDFLAGS = $(LDFLAGS) -- cgit v1.2.3 From 4e3b799d7dbb2a12ca8dca8d3594d32095772973 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 20 Oct 2009 19:19:35 -0400 Subject: perf tools: Use strsep() over strtok_r() for parsing single line The second argument in the strtok_r() function is not to be used generically and can have different implementations. Currently the function parsing of the perf trace code uses the second argument to copy data from. This can crash the tool or just have unpredictable results. The correct solution is to use strsep() which has a defined result. I also added a check to see if the result was correct, and will break out of the loop in case it fails to parse as expected. Reported-by: Arnaldo Carvalho de Melo Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker LKML-Reference: <20091020232034.237814877@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 4b61b497040e..eae560503086 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -286,16 +286,19 @@ void parse_ftrace_printk(char *file, unsigned int size __unused) char *line; char *next = NULL; char *addr_str; - char *fmt; int i; line = strtok_r(file, "\n", &next); while (line) { + addr_str = strsep(&line, ":"); + if (!line) { + warning("error parsing print strings"); + break; + } item = malloc_or_die(sizeof(*item)); - addr_str = strtok_r(line, ":", &fmt); item->addr = strtoull(addr_str, NULL, 16); /* fmt still has a space, skip it */ - item->printk = strdup(fmt+1); + item->printk = strdup(line+1); item->next = list; list = item; line = strtok_r(NULL, "\n", &next); -- cgit v1.2.3 From af0a6fa46388e1e0c2d1a672aad84f8f6ef0b20b Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 22 Oct 2009 23:23:22 +0200 Subject: perf tools: Fix missing top level callchain While recursively printing the branches of each callchains, we forget to display the root. It is never printed. Say we have: symbol f1 f2 | -------- f3 | f4 | ---------f5 f6 Actually we never see that, instead it displays: symbol | --------- f3 | f4 | --------- f5 f6 However f1 is always the same than "symbol" and if we are sorting by symbols first then "symbol", f1 and f2 will be well aligned like in the above example, so displaying f1 looks redundant here. But if we are sorting by something else first (dso, comm, etc...), displaying f1 doesn't look redundant but rather necessary because the symbol is not well aligned anymore with its callchain: comm dso symbol f1 f2 | --------- [...] And we want the callchain to be obvious. So we fix the bug by printing the root branch, but we also filter its first entry if we are sorting by symbols first. Reported-by: Anton Blanchard Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras LKML-Reference: <1256246604-17156-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-report.c | 40 +++++++++++++++++++++++++++++++++------- tools/perf/util/sort.c | 10 +++++++--- tools/perf/util/sort.h | 1 + 3 files changed, 41 insertions(+), 10 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index bee207ce589a..3d8c52220f1f 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -122,8 +122,8 @@ static void init_rem_hits(void) } static size_t -callchain__fprintf_graph(FILE *fp, struct callchain_node *self, - u64 total_samples, int depth, int depth_mask) +__callchain__fprintf_graph(FILE *fp, struct callchain_node *self, + u64 total_samples, int depth, int depth_mask) { struct rb_node *node, *next; struct callchain_node *child; @@ -174,9 +174,9 @@ callchain__fprintf_graph(FILE *fp, struct callchain_node *self, new_total, cumul); } - ret += callchain__fprintf_graph(fp, child, new_total, - depth + 1, - new_depth_mask | (1 << depth)); + ret += __callchain__fprintf_graph(fp, child, new_total, + depth + 1, + new_depth_mask | (1 << depth)); node = next; } @@ -196,6 +196,33 @@ callchain__fprintf_graph(FILE *fp, struct callchain_node *self, return ret; } +static size_t +callchain__fprintf_graph(FILE *fp, struct callchain_node *self, + u64 total_samples) +{ + struct callchain_list *chain; + int i = 0; + int ret = 0; + + list_for_each_entry(chain, &self->val, list) { + if (chain->ip >= PERF_CONTEXT_MAX) + continue; + + if (!i++ && sort_by_sym_first) + continue; + + if (chain->sym) + ret += fprintf(fp, " %s\n", chain->sym->name); + else + ret += fprintf(fp, " %p\n", + (void *)(long)chain->ip); + } + + ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1); + + return ret; +} + static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self, u64 total_samples) @@ -244,8 +271,7 @@ hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, break; case CHAIN_GRAPH_ABS: /* Falldown */ case CHAIN_GRAPH_REL: - ret += callchain__fprintf_graph(fp, chain, - total_samples, 1, 1); + ret += callchain__fprintf_graph(fp, chain, total_samples); case CHAIN_NONE: default: break; diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 40c9acd41cad..60ced707bd6b 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -5,8 +5,9 @@ char default_parent_pattern[] = "^sys_|^do_page_fault"; char *parent_pattern = default_parent_pattern; char default_sort_order[] = "comm,dso,symbol"; char *sort_order = default_sort_order; -int sort__need_collapse = 0; -int sort__has_parent = 0; +int sort__need_collapse = 0; +int sort__has_parent = 0; +int sort_by_sym_first; unsigned int dsos__col_width; unsigned int comms__col_width; @@ -265,6 +266,10 @@ int sort_dimension__add(const char *tok) sort__has_parent = 1; } + if (list_empty(&hist_entry__sort_list) && + !strcmp(sd->name, "symbol")) + sort_by_sym_first = true; + list_add_tail(&sd->entry->list, &hist_entry__sort_list); sd->taken = 1; @@ -273,4 +278,3 @@ int sort_dimension__add(const char *tok) return -ESRCH; } - diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 13806d782af6..24c2b709f0d3 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -39,6 +39,7 @@ extern struct sort_entry sort_parent; extern unsigned int dsos__col_width; extern unsigned int comms__col_width; extern unsigned int threads__col_width; +extern int sort_by_sym_first; struct hist_entry { struct rb_node rb_node; -- cgit v1.2.3 From a4fb581b15949cfd10b64c8af37bc106e95307f3 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 22 Oct 2009 23:23:23 +0200 Subject: perf tools: Bind callchains to the first sort dimension column Currently, the callchains are displayed using a constant left margin. So depending on the current sort dimension configuration, callchains may appear to be well attached to the first sort dimension column field which is mostly the case, except when the first dimension of sorting is done by comm, because these are right aligned. This patch binds the callchain to the first letter in the first column, whatever type of column it is (dso, comm, symbol). Before: 0.80% perf [k] __lock_acquire __lock_acquire lock_acquire | |--58.33%-- _spin_lock | | | |--28.57%-- inotify_should_send_event | | fsnotify | | __fsnotify_parent After: 0.80% perf [k] __lock_acquire __lock_acquire lock_acquire | |--58.33%-- _spin_lock | | | |--28.57%-- inotify_should_send_event | | fsnotify | | __fsnotify_parent Also, for clarity, we don't put anymore the callchain as is but: - If we have a top level ancestor in the callchain, start it with a first ascii hook. Before: 0.80% perf [kernel] [k] __lock_acquire __lock_acquire lock_acquire | |--58.33%-- _spin_lock | | | |--28.57%-- inotify_should_send_event | | fsnotify [..] [..] After: 0.80% perf [kernel] [k] __lock_acquire | --- __lock_acquire lock_acquire | |--58.33%-- _spin_lock | | | |--28.57%-- inotify_should_send_event | | fsnotify [..] [..] - Otherwise, if we have several top level ancestors, then display these like we did before: 1.69% Xorg | |--21.21%-- vread_hpet | 0x7fffd85b46fc | 0x7fffd85b494d | 0x7f4fafb4e54d | |--15.15%-- exaOffscreenAlloc | |--9.09%-- I830WaitLpRing Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Anton Blanchard LKML-Reference: <1256246604-17156-2-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-report.c | 82 ++++++++++++++++++++++++++++++++++----------- tools/perf/util/sort.c | 18 +++++++--- tools/perf/util/sort.h | 10 +++++- tools/perf/util/thread.c | 11 ++++++ tools/perf/util/thread.h | 2 ++ 5 files changed, 99 insertions(+), 24 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 3d8c52220f1f..72d58421223d 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -59,12 +59,28 @@ static struct perf_header *header; static u64 sample_type; -static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask) + +static size_t +callchain__fprintf_left_margin(FILE *fp, int left_margin) +{ + int i; + int ret; + + ret = fprintf(fp, " "); + + for (i = 0; i < left_margin; i++) + ret += fprintf(fp, " "); + + return ret; +} + +static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, + int left_margin) { int i; size_t ret = 0; - ret += fprintf(fp, "%s", " "); + ret += callchain__fprintf_left_margin(fp, left_margin); for (i = 0; i < depth; i++) if (depth_mask & (1 << i)) @@ -79,12 +95,12 @@ static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask) static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth, int depth_mask, int count, u64 total_samples, - int hits) + int hits, int left_margin) { int i; size_t ret = 0; - ret += fprintf(fp, "%s", " "); + ret += callchain__fprintf_left_margin(fp, left_margin); for (i = 0; i < depth; i++) { if (depth_mask & (1 << i)) ret += fprintf(fp, "|"); @@ -123,7 +139,8 @@ static void init_rem_hits(void) static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, - u64 total_samples, int depth, int depth_mask) + u64 total_samples, int depth, int depth_mask, + int left_margin) { struct rb_node *node, *next; struct callchain_node *child; @@ -164,7 +181,8 @@ __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, * But we keep the older depth mask for the line seperator * to keep the level link until we reach the last child */ - ret += ipchain__fprintf_graph_line(fp, depth, depth_mask); + ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, + left_margin); i = 0; list_for_each_entry(chain, &child->val, list) { if (chain->ip >= PERF_CONTEXT_MAX) @@ -172,11 +190,13 @@ __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, ret += ipchain__fprintf_graph(fp, chain, depth, new_depth_mask, i++, new_total, - cumul); + cumul, + left_margin); } ret += __callchain__fprintf_graph(fp, child, new_total, depth + 1, - new_depth_mask | (1 << depth)); + new_depth_mask | (1 << depth), + left_margin); node = next; } @@ -190,17 +210,19 @@ __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, ret += ipchain__fprintf_graph(fp, &rem_hits, depth, new_depth_mask, 0, new_total, - remaining); + remaining, left_margin); } return ret; } + static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self, - u64 total_samples) + u64 total_samples, int left_margin) { struct callchain_list *chain; + bool printed = false; int i = 0; int ret = 0; @@ -208,17 +230,27 @@ callchain__fprintf_graph(FILE *fp, struct callchain_node *self, if (chain->ip >= PERF_CONTEXT_MAX) continue; - if (!i++ && sort_by_sym_first) + if (!i++ && sort__first_dimension == SORT_SYM) continue; + if (!printed) { + ret += callchain__fprintf_left_margin(fp, left_margin); + ret += fprintf(fp, "|\n"); + ret += callchain__fprintf_left_margin(fp, left_margin); + ret += fprintf(fp, "---"); + + left_margin += 3; + printed = true; + } else + ret += callchain__fprintf_left_margin(fp, left_margin); + if (chain->sym) - ret += fprintf(fp, " %s\n", chain->sym->name); + ret += fprintf(fp, " %s\n", chain->sym->name); else - ret += fprintf(fp, " %p\n", - (void *)(long)chain->ip); + ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); } - ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1); + ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin); return ret; } @@ -251,7 +283,7 @@ callchain__fprintf_flat(FILE *fp, struct callchain_node *self, static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, - u64 total_samples) + u64 total_samples, int left_margin) { struct rb_node *rb_node; struct callchain_node *chain; @@ -271,7 +303,8 @@ hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, break; case CHAIN_GRAPH_ABS: /* Falldown */ case CHAIN_GRAPH_REL: - ret += callchain__fprintf_graph(fp, chain, total_samples); + ret += callchain__fprintf_graph(fp, chain, total_samples, + left_margin); case CHAIN_NONE: default: break; @@ -316,8 +349,19 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples) ret += fprintf(fp, "\n"); - if (callchain) - hist_entry_callchain__fprintf(fp, self, total_samples); + if (callchain) { + int left_margin = 0; + + if (sort__first_dimension == SORT_COMM) { + se = list_first_entry(&hist_entry__sort_list, typeof(*se), + list); + left_margin = se->width ? *se->width : 0; + left_margin -= thread__comm_len(self->thread); + } + + hist_entry_callchain__fprintf(fp, self, total_samples, + left_margin); + } return ret; } diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 60ced707bd6b..b490354d1b23 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -7,7 +7,8 @@ char default_sort_order[] = "comm,dso,symbol"; char *sort_order = default_sort_order; int sort__need_collapse = 0; int sort__has_parent = 0; -int sort_by_sym_first; + +enum sort_type sort__first_dimension; unsigned int dsos__col_width; unsigned int comms__col_width; @@ -266,9 +267,18 @@ int sort_dimension__add(const char *tok) sort__has_parent = 1; } - if (list_empty(&hist_entry__sort_list) && - !strcmp(sd->name, "symbol")) - sort_by_sym_first = true; + if (list_empty(&hist_entry__sort_list)) { + if (!strcmp(sd->name, "pid")) + sort__first_dimension = SORT_PID; + else if (!strcmp(sd->name, "comm")) + sort__first_dimension = SORT_COMM; + else if (!strcmp(sd->name, "dso")) + sort__first_dimension = SORT_DSO; + else if (!strcmp(sd->name, "symbol")) + sort__first_dimension = SORT_SYM; + else if (!strcmp(sd->name, "parent")) + sort__first_dimension = SORT_PARENT; + } list_add_tail(&sd->entry->list, &hist_entry__sort_list); sd->taken = 1; diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 24c2b709f0d3..333e664ff45f 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -39,7 +39,7 @@ extern struct sort_entry sort_parent; extern unsigned int dsos__col_width; extern unsigned int comms__col_width; extern unsigned int threads__col_width; -extern int sort_by_sym_first; +extern enum sort_type sort__first_dimension; struct hist_entry { struct rb_node rb_node; @@ -54,6 +54,14 @@ struct hist_entry { struct rb_root sorted_chain; }; +enum sort_type { + SORT_PID, + SORT_COMM, + SORT_DSO, + SORT_SYM, + SORT_PARENT +}; + /* * configurable sorting bits */ diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index f53fad7c0a8d..8cb47f1d8a76 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -33,6 +33,17 @@ int thread__set_comm(struct thread *self, const char *comm) return self->comm ? 0 : -ENOMEM; } +int thread__comm_len(struct thread *self) +{ + if (!self->comm_len) { + if (!self->comm) + return 0; + self->comm_len = strlen(self->comm); + } + + return self->comm_len; +} + static size_t thread__fprintf(struct thread *self, FILE *fp) { struct rb_node *nd; diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 1abef3b7455d..53addd77ce8f 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -12,9 +12,11 @@ struct thread { pid_t pid; char shortname[3]; char *comm; + int comm_len; }; int thread__set_comm(struct thread *self, const char *comm); +int thread__comm_len(struct thread *self); struct thread *threads__findnew(pid_t pid); struct thread *register_idle_thread(void); void thread__insert_map(struct thread *self, struct map *map); -- cgit v1.2.3 From 802da5f2289bbe363acef084805195c11f453c48 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 22 Oct 2009 23:23:24 +0200 Subject: perf tools: Drop asm/types.h wrapper Wrapping the kernel headers is dangerous when it comes to arch headers. Once we wrap asm/types.h, it will also replace the glibc asm/types.h, not only the kernel one. This results in build errors on some machines. Drop this wrapper and do its work from linux/types.h wrapper, also the glibc asm/types.h can already handle most of the type definition it was doing (typedef __u64, __u32, etc...). Todo: Check the others asm/*.h wrappers to prevent from other conflicts. Reported-by: Ingo Molnar Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Anton Blanchard LKML-Reference: <1256246604-17156-3-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 - tools/perf/util/include/asm/bitops.h | 12 ++++++++++++ tools/perf/util/include/asm/byteorder.h | 2 +- tools/perf/util/include/asm/types.h | 17 ----------------- tools/perf/util/include/linux/types.h | 2 ++ 5 files changed, 15 insertions(+), 19 deletions(-) delete mode 100644 tools/perf/util/include/asm/types.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 65e6e52fe378..0a40c29b2387 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -352,7 +352,6 @@ LIB_H += util/include/asm/bitops.h LIB_H += util/include/asm/byteorder.h LIB_H += util/include/asm/swab.h LIB_H += util/include/asm/system.h -LIB_H += util/include/asm/types.h LIB_H += util/include/asm/uaccess.h LIB_H += perf.h LIB_H += util/event.h diff --git a/tools/perf/util/include/asm/bitops.h b/tools/perf/util/include/asm/bitops.h index fbe4d9212919..58e9817ffae0 100644 --- a/tools/perf/util/include/asm/bitops.h +++ b/tools/perf/util/include/asm/bitops.h @@ -1,6 +1,18 @@ +#ifndef _PERF_ASM_BITOPS_H_ +#define _PERF_ASM_BITOPS_H_ + +#include +#include "../../types.h" +#include + +/* CHECKME: Not sure both always match */ +#define BITS_PER_LONG __WORDSIZE + #include "../../../../include/asm-generic/bitops/__fls.h" #include "../../../../include/asm-generic/bitops/fls.h" #include "../../../../include/asm-generic/bitops/fls64.h" #include "../../../../include/asm-generic/bitops/__ffs.h" #include "../../../../include/asm-generic/bitops/ffz.h" #include "../../../../include/asm-generic/bitops/hweight.h" + +#endif diff --git a/tools/perf/util/include/asm/byteorder.h b/tools/perf/util/include/asm/byteorder.h index 39f367cfaf56..b722abe3a626 100644 --- a/tools/perf/util/include/asm/byteorder.h +++ b/tools/perf/util/include/asm/byteorder.h @@ -1,2 +1,2 @@ -#include "../asm/types.h" +#include #include "../../../../include/linux/swab.h" diff --git a/tools/perf/util/include/asm/types.h b/tools/perf/util/include/asm/types.h deleted file mode 100644 index 06703c6cd50e..000000000000 --- a/tools/perf/util/include/asm/types.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef PERF_ASM_TYPES_H_ -#define PERF_ASM_TYPES_H_ - -#include -#include "../../types.h" -#include - -/* CHECKME: Not sure both always match */ -#define BITS_PER_LONG __WORDSIZE - -typedef u64 __u64; -typedef u32 __u32; -typedef u16 __u16; -typedef u8 __u8; -typedef s64 __s64; - -#endif /* PERF_ASM_TYPES_H_ */ diff --git a/tools/perf/util/include/linux/types.h b/tools/perf/util/include/linux/types.h index 858a38d08435..196862a81a21 100644 --- a/tools/perf/util/include/linux/types.h +++ b/tools/perf/util/include/linux/types.h @@ -1,6 +1,8 @@ #ifndef _PERF_LINUX_TYPES_H_ #define _PERF_LINUX_TYPES_H_ +#include + #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] -- cgit v1.2.3 From 6beba7adbe092e63dfe8d09fbd1e3ec140474a13 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 21 Oct 2009 17:34:06 -0200 Subject: perf tools: Unify debug messages mechanisms We were using eprintf in some places, that looks at a global 'verbose' level, and at other places passing a 'v' parameter to specify the verbosity level, unify it by introducing pr_{err,warning,debug,etc}, just like in the kernel. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1256153646-10097-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 3 +- tools/perf/builtin-record.c | 2 +- tools/perf/builtin-report.c | 9 ++- tools/perf/builtin-sched.c | 4 +- tools/perf/builtin-timechart.c | 13 ++-- tools/perf/builtin-top.c | 2 +- tools/perf/builtin-trace.c | 4 +- tools/perf/util/callchain.c | 2 +- tools/perf/util/debug.c | 4 +- tools/perf/util/debug.h | 3 +- tools/perf/util/event.h | 3 +- tools/perf/util/header.c | 2 +- tools/perf/util/include/linux/kernel.h | 17 +++++ tools/perf/util/map.c | 17 ++--- tools/perf/util/symbol.c | 134 +++++++++++++++------------------ tools/perf/util/symbol.h | 5 +- tools/perf/util/thread.c | 6 +- 17 files changed, 114 insertions(+), 116 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 99bac6aa72c4..6d63c2eea2c7 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -203,8 +203,7 @@ static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { struct map *map = map__new(&event->mmap, NULL, 0, - sizeof(struct sym_priv), symbol_filter, - verbose); + sizeof(struct sym_priv), symbol_filter); struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n", diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index f0467ff0d8ad..ac5ddfff4456 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -630,7 +630,7 @@ static int __cmd_record(int argc, const char **argv) param.sched_priority = realtime_prio; if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { - printf("Could not set realtime priority.\n"); + pr_err("Could not set realtime priority.\n"); exit(-1); } } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 72d58421223d..b3d814b54555 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -689,7 +689,8 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) dump_printf("... chain: nr:%Lu\n", chain->nr); if (validate_chain(chain, event) < 0) { - eprintf("call-chain problem with event, skipping it.\n"); + pr_debug("call-chain problem with event, " + "skipping it.\n"); return 0; } @@ -700,7 +701,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) } if (thread == NULL) { - eprintf("problem processing %d event, skipping it.\n", + pr_debug("problem processing %d event, skipping it.\n", event->header.type); return -1; } @@ -738,7 +739,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) if (hist_entry__add(thread, map, sym, ip, chain, level, period)) { - eprintf("problem incrementing symbol count, skipping event\n"); + pr_debug("problem incrementing symbol count, skipping event\n"); return -1; } @@ -750,7 +751,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { - struct map *map = map__new(&event->mmap, cwd, cwdlen, 0, NULL, verbose); + struct map *map = map__new(&event->mmap, cwd, cwdlen, 0, NULL); struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 807ca66e7a8c..9a48d9626be4 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1666,8 +1666,8 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) (long long)period); if (thread == NULL) { - eprintf("problem processing %d event, skipping it.\n", - event->header.type); + pr_debug("problem processing %d event, skipping it.\n", + event->header.type); return -1; } diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 34fad57087f9..0a2f22261c3a 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -1162,12 +1162,10 @@ more: size = event->header.size; if (!size || process_event(event) < 0) { - - printf("%p [%p]: skipping unknown header type: %d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.type); - + pr_warning("%p [%p]: skipping unknown header type: %d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.type); /* * assume we lost track of the stream, check alignment, and * increment a single u64 in the hope to catch on again 'soon'. @@ -1200,7 +1198,8 @@ done: write_svg_file(output_name); - printf("Written %2.1f seconds of trace to %s.\n", (last_time - first_time) / 1000000000.0, output_name); + pr_info("Written %2.1f seconds of trace to %s.\n", + (last_time - first_time) / 1000000000.0, output_name); return rc; } diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index fa20345a0ab0..4a9fe228be2a 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -809,7 +809,7 @@ static int symbol_filter(struct map *map, struct symbol *sym) static int parse_symbols(void) { if (dsos__load_kernel(vmlinux_name, sizeof(struct sym_entry), - symbol_filter, verbose, 1) <= 0) + symbol_filter, 1) <= 0) return -1; if (dump_symtab) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 4c129ff0bb16..e566bbe3f22d 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -81,8 +81,8 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) (long long)period); if (thread == NULL) { - eprintf("problem processing %d event, skipping it.\n", - event->header.type); + pr_debug("problem processing %d event, skipping it.\n", + event->header.type); return -1; } diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 3b8380f1b478..b3b71258272a 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -206,7 +206,7 @@ fill_node(struct callchain_node *node, struct ip_callchain *chain, } node->val_nr = chain->nr - start; if (!node->val_nr) - printf("Warning: empty node in callchain tree\n"); + pr_warning("Warning: empty node in callchain tree\n"); } static void diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index e8ca98fe0bd4..28d520d5a1fb 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -13,12 +13,12 @@ int verbose = 0; int dump_trace = 0; -int eprintf(const char *fmt, ...) +int eprintf(int level, const char *fmt, ...) { va_list args; int ret = 0; - if (verbose) { + if (verbose >= level) { va_start(args, fmt); ret = vfprintf(stderr, fmt, args); va_end(args); diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index 02d1fa1c2465..e8b18a1f87a4 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h @@ -5,7 +5,8 @@ extern int verbose; extern int dump_trace; -int eprintf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); +int eprintf(int level, + const char *fmt, ...) __attribute__((format(printf, 2, 3))); int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); void trace_event(event_t *event); diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index db59c8bbe49a..d972b4b0d38c 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -106,8 +106,7 @@ struct symbol; typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, - unsigned int sym_priv_size, symbol_filter_t filter, - int v); + unsigned int sym_priv_size, symbol_filter_t filter); struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *self, FILE *fp); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 622c60e45254..7d26659b806c 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -93,7 +93,7 @@ static struct perf_trace_event_type *events; void perf_header__push_event(u64 id, const char *name) { if (strlen(name) > MAX_EVENT_NAME) - printf("Event %s will be truncated\n", name); + pr_warning("Event %s will be truncated\n", name); if (!events) { events = malloc(sizeof(struct perf_trace_event_type)); diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h index 4b9204d9b266..21c0274c02fa 100644 --- a/tools/perf/util/include/linux/kernel.h +++ b/tools/perf/util/include/linux/kernel.h @@ -85,4 +85,21 @@ simple_strtoul(const char *nptr, char **endptr, int base) return strtoul(nptr, endptr, base); } +#ifndef pr_fmt +#define pr_fmt(fmt) fmt +#endif + +#define pr_err(fmt, ...) \ + do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0) +#define pr_warning(fmt, ...) \ + do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0) +#define pr_info(fmt, ...) \ + do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0) +#define pr_debug(fmt, ...) \ + eprintf(1, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debugN(n, fmt, ...) \ + eprintf(n, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__) + #endif diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 55079c0200e0..c1c556825343 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -21,8 +21,7 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen) } struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, - unsigned int sym_priv_size, symbol_filter_t filter, - int v) + unsigned int sym_priv_size, symbol_filter_t filter) { struct map *self = malloc(sizeof(*self)); @@ -58,16 +57,16 @@ struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, goto out_delete; if (new_dso) { - int nr = dso__load(self->dso, self, filter, v); + int nr = dso__load(self->dso, self, filter); if (nr < 0) - eprintf("Failed to open %s, continuing " - "without symbols\n", - self->dso->long_name); + pr_warning("Failed to open %s, continuing " + "without symbols\n", + self->dso->long_name); else if (nr == 0) - eprintf("No symbols found in %s, maybe " - "install a debug package?\n", - self->dso->long_name); + pr_warning("No symbols found in %s, maybe " + "install a debug package?\n", + self->dso->long_name); } if (self->dso == vdso || anon) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 0a4898480d6d..8f0208ce237a 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -76,7 +76,7 @@ static void kernel_maps__fixup_end(void) } static struct symbol *symbol__new(u64 start, u64 len, const char *name, - unsigned int priv_size, int v) + unsigned int priv_size) { size_t namelen = strlen(name) + 1; struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen); @@ -91,8 +91,7 @@ static struct symbol *symbol__new(u64 start, u64 len, const char *name, self->start = start; self->end = len ? start + len - 1 : start; - if (v > 2) - printf("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end); + pr_debug3("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end); memcpy(self->name, name, namelen); @@ -209,7 +208,7 @@ size_t dso__fprintf(struct dso *self, FILE *fp) * so that we can in the next step set the symbol ->end address and then * call kernel_maps__split_kallsyms. */ -static int kernel_maps__load_all_kallsyms(int v) +static int kernel_maps__load_all_kallsyms(void) { char *line = NULL; size_t n; @@ -252,7 +251,7 @@ static int kernel_maps__load_all_kallsyms(int v) * Will fix up the end later, when we have all symbols sorted. */ sym = symbol__new(start, 0, symbol_name, - kernel_map->dso->sym_priv_size, v); + kernel_map->dso->sym_priv_size); if (sym == NULL) goto out_delete_line; @@ -300,8 +299,8 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter, int use_modules) if (strcmp(map->dso->name, module)) { map = kernel_maps__find_by_dso_name(module); if (!map) { - fputs("/proc/{kallsyms,modules} " - "inconsistency!\n", stderr); + pr_err("/proc/{kallsyms,modules} " + "inconsistency!\n"); return -1; } } @@ -351,10 +350,9 @@ delete_symbol: } -static int kernel_maps__load_kallsyms(symbol_filter_t filter, - int use_modules, int v) +static int kernel_maps__load_kallsyms(symbol_filter_t filter, int use_modules) { - if (kernel_maps__load_all_kallsyms(v)) + if (kernel_maps__load_all_kallsyms()) return -1; dso__fixup_sym_end(kernel_map->dso); @@ -362,9 +360,9 @@ static int kernel_maps__load_kallsyms(symbol_filter_t filter, return kernel_maps__split_kallsyms(filter, use_modules); } -static size_t kernel_maps__fprintf(FILE *fp, int v) +static size_t kernel_maps__fprintf(FILE *fp) { - size_t printed = fprintf(stderr, "Kernel maps:\n"); + size_t printed = fprintf(fp, "Kernel maps:\n"); struct rb_node *nd; for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) { @@ -372,17 +370,17 @@ static size_t kernel_maps__fprintf(FILE *fp, int v) printed += fprintf(fp, "Map:"); printed += map__fprintf(pos, fp); - if (v > 1) { + if (verbose > 1) { printed += dso__fprintf(pos->dso, fp); printed += fprintf(fp, "--\n"); } } - return printed + fprintf(stderr, "END kernel maps\n"); + return printed + fprintf(fp, "END kernel maps\n"); } static int dso__load_perf_map(struct dso *self, struct map *map, - symbol_filter_t filter, int v) + symbol_filter_t filter) { char *line = NULL; size_t n; @@ -420,7 +418,7 @@ static int dso__load_perf_map(struct dso *self, struct map *map, continue; sym = symbol__new(start, size, line + len, - self->sym_priv_size, v); + self->sym_priv_size); if (sym == NULL) goto out_delete_line; @@ -534,7 +532,7 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, * And always look at the original dso, not at debuginfo packages, that * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). */ -static int dso__synthesize_plt_symbols(struct dso *self, int v) +static int dso__synthesize_plt_symbols(struct dso *self) { uint32_t nr_rel_entries, idx; GElf_Sym sym; @@ -618,7 +616,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v) "%s@plt", elf_sym__name(&sym, symstrs)); f = symbol__new(plt_offset, shdr_plt.sh_entsize, - sympltname, self->sym_priv_size, v); + sympltname, self->sym_priv_size); if (!f) goto out_elf_end; @@ -636,7 +634,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v) "%s@plt", elf_sym__name(&sym, symstrs)); f = symbol__new(plt_offset, shdr_plt.sh_entsize, - sympltname, self->sym_priv_size, v); + sympltname, self->sym_priv_size); if (!f) goto out_elf_end; @@ -654,14 +652,14 @@ out_close: if (err == 0) return nr; out: - fprintf(stderr, "%s: problems reading %s PLT info.\n", - __func__, self->long_name); + pr_warning("%s: problems reading %s PLT info.\n", + __func__, self->long_name); return 0; } static int dso__load_sym(struct dso *self, struct map *map, const char *name, int fd, symbol_filter_t filter, int kernel, - int kmodule, int v) + int kmodule) { struct map *curr_map = map; struct dso *curr_dso = self; @@ -680,15 +678,12 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); if (elf == NULL) { - if (v) - fprintf(stderr, "%s: cannot read %s ELF file.\n", - __func__, name); + pr_err("%s: cannot read %s ELF file.\n", __func__, name); goto out_close; } if (gelf_getehdr(elf, &ehdr) == NULL) { - if (v) - fprintf(stderr, "%s: cannot get elf header.\n", __func__); + pr_err("%s: cannot get elf header.\n", __func__); goto out_elf_end; } @@ -794,10 +789,9 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, } if (curr_dso->adjust_symbols) { - if (v > 2) - printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n", - (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset); - + pr_debug2("adjusting symbol: st_value: %Lx sh_addr: " + "%Lx sh_offset: %Lx\n", (u64)sym.st_value, + (u64)shdr.sh_addr, (u64)shdr.sh_offset); sym.st_value -= shdr.sh_addr - shdr.sh_offset; } /* @@ -810,7 +804,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, elf_name = demangled; new_symbol: f = symbol__new(sym.st_value, sym.st_size, elf_name, - curr_dso->sym_priv_size, v); + curr_dso->sym_priv_size); free(demangled); if (!f) goto out_elf_end; @@ -837,7 +831,7 @@ out_close: #define BUILD_ID_SIZE 128 -static char *dso__read_build_id(struct dso *self, int v) +static char *dso__read_build_id(struct dso *self) { int i; GElf_Ehdr ehdr; @@ -854,15 +848,13 @@ static char *dso__read_build_id(struct dso *self, int v) elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); if (elf == NULL) { - if (v) - fprintf(stderr, "%s: cannot read %s ELF file.\n", - __func__, self->long_name); + pr_err("%s: cannot read %s ELF file.\n", __func__, + self->long_name); goto out_close; } if (gelf_getehdr(elf, &ehdr) == NULL) { - if (v) - fprintf(stderr, "%s: cannot get elf header.\n", __func__); + pr_err("%s: cannot get elf header.\n", __func__); goto out_elf_end; } @@ -884,8 +876,7 @@ static char *dso__read_build_id(struct dso *self, int v) ++raw; bid += 2; } - if (v >= 2) - printf("%s(%s): %s\n", __func__, self->long_name, build_id); + pr_debug2("%s(%s): %s\n", __func__, self->long_name, build_id); out_elf_end: elf_end(elf); out_close: @@ -911,8 +902,7 @@ char dso__symtab_origin(const struct dso *self) return origin[self->origin]; } -int dso__load(struct dso *self, struct map *map, - symbol_filter_t filter, int v) +int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) { int size = PATH_MAX; char *name = malloc(size), *build_id = NULL; @@ -925,7 +915,7 @@ int dso__load(struct dso *self, struct map *map, self->adjust_symbols = 0; if (strncmp(self->name, "/tmp/perf-", 10) == 0) { - ret = dso__load_perf_map(self, map, filter, v); + ret = dso__load_perf_map(self, map, filter); self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT : DSO__ORIG_NOT_FOUND; return ret; @@ -946,7 +936,7 @@ more: self->long_name); break; case DSO__ORIG_BUILDID: - build_id = dso__read_build_id(self, v); + build_id = dso__read_build_id(self); if (build_id != NULL) { snprintf(name, size, "/usr/lib/debug/.build-id/%.2s/%s.debug", @@ -967,7 +957,7 @@ more: fd = open(name, O_RDONLY); } while (fd < 0); - ret = dso__load_sym(self, map, name, fd, filter, 0, 0, v); + ret = dso__load_sym(self, map, name, fd, filter, 0, 0); close(fd); /* @@ -977,7 +967,7 @@ more: goto more; if (ret > 0) { - int nr_plt = dso__synthesize_plt_symbols(self, v); + int nr_plt = dso__synthesize_plt_symbols(self); if (nr_plt > 0) ret += nr_plt; } @@ -1025,34 +1015,29 @@ struct map *kernel_maps__find_by_dso_name(const char *name) } static int dso__load_module_sym(struct dso *self, struct map *map, - symbol_filter_t filter, int v) + symbol_filter_t filter) { int err = 0, fd = open(self->long_name, O_RDONLY); if (fd < 0) { - if (v) - fprintf(stderr, "%s: cannot open %s\n", - __func__, self->long_name); + pr_err("%s: cannot open %s\n", __func__, self->long_name); return err; } - err = dso__load_sym(self, map, self->long_name, fd, filter, 0, 1, v); + err = dso__load_sym(self, map, self->long_name, fd, filter, 0, 1); close(fd); return err; } -static int dsos__load_modules_sym_dir(char *dirname, - symbol_filter_t filter, int v) +static int dsos__load_modules_sym_dir(char *dirname, symbol_filter_t filter) { struct dirent *dent; int nr_symbols = 0, err; DIR *dir = opendir(dirname); if (!dir) { - if (v) - fprintf(stderr, "%s: cannot open %s dir\n", __func__, - dirname); + pr_err("%s: cannot open %s dir\n", __func__, dirname); return -1; } @@ -1066,7 +1051,7 @@ static int dsos__load_modules_sym_dir(char *dirname, snprintf(path, sizeof(path), "%s/%s", dirname, dent->d_name); - err = dsos__load_modules_sym_dir(path, filter, v); + err = dsos__load_modules_sym_dir(path, filter); if (err < 0) goto failure; } else { @@ -1092,7 +1077,7 @@ static int dsos__load_modules_sym_dir(char *dirname, if (map->dso->long_name == NULL) goto failure; - err = dso__load_module_sym(map->dso, map, filter, v); + err = dso__load_module_sym(map->dso, map, filter); if (err < 0) goto failure; last = rb_last(&map->dso->syms); @@ -1119,7 +1104,7 @@ failure: return -1; } -static int dsos__load_modules_sym(symbol_filter_t filter, int v) +static int dsos__load_modules_sym(symbol_filter_t filter) { struct utsname uts; char modules_path[PATH_MAX]; @@ -1130,7 +1115,7 @@ static int dsos__load_modules_sym(symbol_filter_t filter, int v) snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel", uts.release); - return dsos__load_modules_sym_dir(modules_path, filter, v); + return dsos__load_modules_sym_dir(modules_path, filter); } /* @@ -1225,15 +1210,14 @@ out_failure: } static int dso__load_vmlinux(struct dso *self, struct map *map, - const char *vmlinux, - symbol_filter_t filter, int v) + const char *vmlinux, symbol_filter_t filter) { int err, fd = open(vmlinux, O_RDONLY); if (fd < 0) return -1; - err = dso__load_sym(self, map, self->long_name, fd, filter, 1, 0, v); + err = dso__load_sym(self, map, self->long_name, fd, filter, 1, 0); close(fd); @@ -1241,7 +1225,7 @@ static int dso__load_vmlinux(struct dso *self, struct map *map, } int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, - symbol_filter_t filter, int v, int use_modules) + symbol_filter_t filter, int use_modules) { int err = -1; struct dso *dso = dso__new(vmlinux, sym_priv_size); @@ -1257,26 +1241,26 @@ int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, kernel_map->map_ip = kernel_map->unmap_ip = identity__map_ip; if (use_modules && dsos__load_modules(sym_priv_size) < 0) { - fprintf(stderr, "Failed to load list of modules in use! " - "Continuing...\n"); + pr_warning("Failed to load list of modules in use! " + "Continuing...\n"); use_modules = 0; } if (vmlinux) { - err = dso__load_vmlinux(dso, kernel_map, vmlinux, filter, v); + err = dso__load_vmlinux(dso, kernel_map, vmlinux, filter); if (err > 0 && use_modules) { - int syms = dsos__load_modules_sym(filter, v); + int syms = dsos__load_modules_sym(filter); if (syms < 0) - fprintf(stderr, "Failed to read module symbols!" - " Continuing...\n"); + pr_warning("Failed to read module symbols!" + " Continuing...\n"); else err += syms; } } if (err <= 0) - err = kernel_maps__load_kallsyms(filter, use_modules, v); + err = kernel_maps__load_kallsyms(filter, use_modules); if (err > 0) { struct rb_node *node = rb_first(&dso->syms); @@ -1296,8 +1280,8 @@ int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, kernel_maps__fixup_end(); dsos__add(dso); - if (v > 0) - kernel_maps__fprintf(stderr, v); + if (verbose) + kernel_maps__fprintf(stderr); } return err; @@ -1355,8 +1339,8 @@ void dsos__fprintf(FILE *fp) int load_kernel(unsigned int sym_priv_size, symbol_filter_t filter) { - if (dsos__load_kernel(vmlinux_name, sym_priv_size, - filter, verbose, modules) <= 0) + if (dsos__load_kernel(vmlinux_name, sym_priv_size, filter, + modules) <= 0) return -1; vdso = dso__new("[vdso]", 0); diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index c2a777de9b7e..77b7b3e42417 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -63,11 +63,10 @@ static inline void *dso__sym_priv(struct dso *self, struct symbol *sym) struct symbol *dso__find_symbol(struct dso *self, u64 ip); int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, - symbol_filter_t filter, int verbose, int modules); + symbol_filter_t filter, int modules); struct dso *dsos__findnew(const char *name, unsigned int sym_priv_size, bool *is_new); -int dso__load(struct dso *self, struct map *map, - symbol_filter_t filter, int v); +int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); void dsos__fprintf(FILE *fp); size_t dso__fprintf(struct dso *self, FILE *fp); diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 8cb47f1d8a76..0f6d78c9863a 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -127,9 +127,9 @@ static void thread__remove_overlappings(struct thread *self, struct map *map) continue; if (verbose >= 2) { - printf("overlapping maps:\n"); - map__fprintf(map, stdout); - map__fprintf(pos, stdout); + fputs("overlapping maps:\n", stderr); + map__fprintf(map, stderr); + map__fprintf(pos, stderr); } rb_erase(&pos->rb_node, &self->maps); -- cgit v1.2.3