summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt7
-rw-r--r--Documentation/trace/ftrace-design.txt13
-rw-r--r--arch/s390/kernel/ftrace.c67
-rw-r--r--arch/x86/include/asm/perf_event.h13
-rw-r--r--arch/x86/kernel/cpu/perf_event.c185
-rw-r--r--arch/x86/kernel/entry_32.S7
-rw-r--r--arch/x86/kernel/entry_64.S6
-rw-r--r--arch/x86/kernel/ftrace.c84
-rw-r--r--arch/x86/mm/testmmiotrace.c29
-rw-r--r--include/linux/ftrace_event.h11
-rw-r--r--include/linux/perf_counter.h1
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--include/linux/smp_lock.h21
-rw-r--r--include/trace/events/bkl.h61
-rw-r--r--include/trace/events/irq.h8
-rw-r--r--include/trace/events/power.h2
-rw-r--r--include/trace/events/sched.h44
-rw-r--r--include/trace/events/timer.h79
-rw-r--r--include/trace/ftrace.h15
-rw-r--r--include/trace/syscall.h2
-rw-r--r--kernel/perf_event.c82
-rw-r--r--kernel/trace/ftrace.c370
-rw-r--r--kernel/trace/ring_buffer.c15
-rw-r--r--kernel/trace/trace.c4
-rw-r--r--kernel/trace/trace.h48
-rw-r--r--kernel/trace/trace_events.c47
-rw-r--r--kernel/trace/trace_events_filter.c423
-rw-r--r--kernel/trace/trace_export.c25
-rw-r--r--kernel/trace/trace_syscalls.c106
-rw-r--r--lib/kernel_lock.c20
-rwxr-xr-xscripts/recordmcount.pl1
-rw-r--r--tools/perf/Documentation/perf-timechart.txt5
-rw-r--r--tools/perf/Makefile58
-rw-r--r--tools/perf/builtin-annotate.c645
-rw-r--r--tools/perf/builtin-record.c93
-rw-r--r--tools/perf/builtin-report.c961
-rw-r--r--tools/perf/builtin-sched.c310
-rw-r--r--tools/perf/builtin-stat.c31
-rw-r--r--tools/perf/builtin-timechart.c146
-rw-r--r--tools/perf/builtin-top.c173
-rw-r--r--tools/perf/builtin-trace.c174
-rw-r--r--tools/perf/perf.c16
-rwxr-xr-xtools/perf/util/PERF-VERSION-GEN2
-rw-r--r--tools/perf/util/cache.h11
-rw-r--r--tools/perf/util/callchain.c2
-rw-r--r--tools/perf/util/callchain.h2
-rw-r--r--tools/perf/util/color.h6
-rw-r--r--tools/perf/util/data_map.c222
-rw-r--r--tools/perf/util/data_map.h31
-rw-r--r--tools/perf/util/debug.c4
-rw-r--r--tools/perf/util/debug.h7
-rw-r--r--tools/perf/util/event.h30
-rw-r--r--tools/perf/util/exec_cmd.h6
-rw-r--r--tools/perf/util/header.c74
-rw-r--r--tools/perf/util/header.h29
-rw-r--r--tools/perf/util/help.h6
-rw-r--r--tools/perf/util/hist.c210
-rw-r--r--tools/perf/util/hist.h50
-rw-r--r--tools/perf/util/include/asm/asm-offsets.h1
-rw-r--r--tools/perf/util/include/asm/bitops.h18
-rw-r--r--tools/perf/util/include/asm/byteorder.h2
-rw-r--r--tools/perf/util/include/asm/swab.h1
-rw-r--r--tools/perf/util/include/asm/uaccess.h14
-rw-r--r--tools/perf/util/include/linux/bitmap.h2
-rw-r--r--tools/perf/util/include/linux/bitops.h27
-rw-r--r--tools/perf/util/include/linux/compiler.h10
-rw-r--r--tools/perf/util/include/linux/ctype.h1
-rw-r--r--tools/perf/util/include/linux/kernel.h76
-rw-r--r--tools/perf/util/include/linux/string.h1
-rw-r--r--tools/perf/util/include/linux/types.h9
-rw-r--r--tools/perf/util/levenshtein.h6
-rw-r--r--tools/perf/util/map.c26
-rw-r--r--tools/perf/util/module.c545
-rw-r--r--tools/perf/util/module.h53
-rw-r--r--tools/perf/util/parse-events.c26
-rw-r--r--tools/perf/util/parse-events.h8
-rw-r--r--tools/perf/util/parse-options.h6
-rw-r--r--tools/perf/util/quote.h6
-rw-r--r--tools/perf/util/run-command.h6
-rw-r--r--tools/perf/util/sigchain.h6
-rw-r--r--tools/perf/util/sort.c290
-rw-r--r--tools/perf/util/sort.h99
-rw-r--r--tools/perf/util/strbuf.h6
-rw-r--r--tools/perf/util/string.c11
-rw-r--r--tools/perf/util/string.h7
-rw-r--r--tools/perf/util/strlist.h6
-rw-r--r--tools/perf/util/svghelper.c2
-rw-r--r--tools/perf/util/svghelper.h6
-rw-r--r--tools/perf/util/symbol.c785
-rw-r--r--tools/perf/util/symbol.h34
-rw-r--r--tools/perf/util/thread.c167
-rw-r--r--tools/perf/util/thread.h31
-rw-r--r--tools/perf/util/trace-event-info.c6
-rw-r--r--tools/perf/util/trace-event-parse.c537
-rw-r--r--tools/perf/util/trace-event-read.c7
-rw-r--r--tools/perf/util/trace-event.h41
-rw-r--r--tools/perf/util/types.h6
-rw-r--r--tools/perf/util/values.h6
98 files changed, 4475 insertions, 3528 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 9107b387e91f..c8d1b2be28ef 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -779,6 +779,13 @@ and is between 256 and 4096 characters. It is defined in the file
by the set_ftrace_notrace file in the debugfs
tracing directory.
+ ftrace_graph_filter=[function-list]
+ [FTRACE] Limit the top level callers functions traced
+ by the function graph tracer at boot up.
+ function-list is a comma separated list of functions
+ that can be changed at run time by the
+ set_graph_function file in the debugfs tracing directory.
+
gamecon.map[2|3]=
[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
support via parallel port (up to 5 devices per port)
diff --git a/Documentation/trace/ftrace-design.txt b/Documentation/trace/ftrace-design.txt
index 7003e10f10f5..641a1ef2a7ff 100644
--- a/Documentation/trace/ftrace-design.txt
+++ b/Documentation/trace/ftrace-design.txt
@@ -213,10 +213,19 @@ If you can't trace NMI functions, then skip this option.
<details to be filled>
-HAVE_FTRACE_SYSCALLS
+HAVE_SYSCALL_TRACEPOINTS
---------------------
-<details to be filled>
+You need very few things to get the syscalls tracing in an arch.
+
+- Have a NR_syscalls variable in <asm/unistd.h> that provides the number
+ of syscalls supported by the arch.
+- Implement arch_syscall_addr() that resolves a syscall address from a
+ syscall number.
+- Support the TIF_SYSCALL_TRACEPOINT thread flags
+- Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace
+ in the ptrace syscalls tracing path.
+- Tag this arch as HAVE_SYSCALL_TRACEPOINTS.
HAVE_FTRACE_MCOUNT_RECORD
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index f5fe34dd821b..5a82bc68193e 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -203,73 +203,10 @@ out:
#ifdef CONFIG_FTRACE_SYSCALLS
-extern unsigned long __start_syscalls_metadata[];
-extern unsigned long __stop_syscalls_metadata[];
extern unsigned int sys_call_table[];
-static struct syscall_metadata **syscalls_metadata;
-
-struct syscall_metadata *syscall_nr_to_meta(int nr)
-{
- if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
- return NULL;
-
- return syscalls_metadata[nr];
-}
-
-int syscall_name_to_nr(char *name)
-{
- int i;
-
- if (!syscalls_metadata)
- return -1;
- for (i = 0; i < NR_syscalls; i++)
- if (syscalls_metadata[i])
- if (!strcmp(syscalls_metadata[i]->name, name))
- return i;
- return -1;
-}
-
-void set_syscall_enter_id(int num, int id)
-{
- syscalls_metadata[num]->enter_id = id;
-}
-
-void set_syscall_exit_id(int num, int id)
+unsigned long __init arch_syscall_addr(int nr)
{
- syscalls_metadata[num]->exit_id = id;
-}
-
-static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
-{
- struct syscall_metadata *start;
- struct syscall_metadata *stop;
- char str[KSYM_SYMBOL_LEN];
-
- start = (struct syscall_metadata *)__start_syscalls_metadata;
- stop = (struct syscall_metadata *)__stop_syscalls_metadata;
- kallsyms_lookup(syscall, NULL, NULL, NULL, str);
-
- for ( ; start < stop; start++) {
- if (start->name && !strcmp(start->name + 3, str + 3))
- return start;
- }
- return NULL;
-}
-
-static int __init arch_init_ftrace_syscalls(void)
-{
- struct syscall_metadata *meta;
- int i;
- syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
- GFP_KERNEL);
- if (!syscalls_metadata)
- return -ENOMEM;
- for (i = 0; i < NR_syscalls; i++) {
- meta = find_syscall_meta((unsigned long)sys_call_table[i]);
- syscalls_metadata[i] = meta;
- }
- return 0;
+ return (unsigned long)sys_call_table[nr];
}
-arch_initcall(arch_init_ftrace_syscalls);
#endif
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index ad7ce3fd5065..8d9f8548a870 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -28,9 +28,20 @@
*/
#define ARCH_PERFMON_EVENT_MASK 0xffff
+/*
+ * filter mask to validate fixed counter events.
+ * the following filters disqualify for fixed counters:
+ * - inv
+ * - edge
+ * - cnt-mask
+ * The other filters are supported by fixed counters.
+ * The any-thread option is supported starting with v3.
+ */
+#define ARCH_PERFMON_EVENT_FILTER_MASK 0xff840000
+
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
-#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index b5801c311846..2e20bca3cca1 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -77,6 +77,18 @@ struct cpu_hw_events {
struct debug_store *ds;
};
+struct event_constraint {
+ unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ int code;
+};
+
+#define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) }
+#define EVENT_CONSTRAINT_END { .code = 0, .idxmsk[0] = 0 }
+
+#define for_each_event_constraint(e, c) \
+ for ((e) = (c); (e)->idxmsk[0]; (e)++)
+
+
/*
* struct x86_pmu - generic x86 pmu
*/
@@ -102,6 +114,8 @@ struct x86_pmu {
u64 intel_ctrl;
void (*enable_bts)(u64 config);
void (*disable_bts)(void);
+ int (*get_event_idx)(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *hwc);
};
static struct x86_pmu x86_pmu __read_mostly;
@@ -110,6 +124,8 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.enabled = 1,
};
+static const struct event_constraint *event_constraints;
+
/*
* Not sure about some of these
*/
@@ -155,6 +171,16 @@ static u64 p6_pmu_raw_event(u64 hw_event)
return hw_event & P6_EVNTSEL_MASK;
}
+static const struct event_constraint intel_p6_event_constraints[] =
+{
+ EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
+ EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
+ EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
+ EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
+ EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
+ EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
+ EVENT_CONSTRAINT_END
+};
/*
* Intel PerfMon v3. Used on Core2 and later.
@@ -170,6 +196,35 @@ static const u64 intel_perfmon_event_map[] =
[PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
};
+static const struct event_constraint intel_core_event_constraints[] =
+{
+ EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
+ EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
+ EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
+ EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
+ EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
+ EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
+ EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
+ EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
+ EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
+ EVENT_CONSTRAINT_END
+};
+
+static const struct event_constraint intel_nehalem_event_constraints[] =
+{
+ EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
+ EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
+ EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
+ EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
+ EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
+ EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
+ EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
+ EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
+ EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
+ EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
+ EVENT_CONSTRAINT_END
+};
+
static u64 intel_pmu_event_map(int hw_event)
{
return intel_perfmon_event_map[hw_event];
@@ -469,7 +524,7 @@ static u64 intel_pmu_raw_event(u64 hw_event)
#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
-#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
+#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
#define CORE_EVNTSEL_MASK \
(CORE_EVNTSEL_EVENT_MASK | \
@@ -932,6 +987,8 @@ static int __hw_perf_event_init(struct perf_event *event)
*/
hwc->config = ARCH_PERFMON_EVENTSEL_INT;
+ hwc->idx = -1;
+
/*
* Count user and OS events unless requested not to.
*/
@@ -1334,8 +1391,7 @@ static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
x86_pmu_enable_event(hwc, idx);
}
-static int
-fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
+static int fixed_mode_idx(struct hw_perf_event *hwc)
{
unsigned int hw_event;
@@ -1349,6 +1405,12 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
if (!x86_pmu.num_events_fixed)
return -1;
+ /*
+ * fixed counters do not take all possible filters
+ */
+ if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK)
+ return -1;
+
if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
return X86_PMC_IDX_FIXED_INSTRUCTIONS;
if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
@@ -1360,22 +1422,57 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
}
/*
- * Find a PMC slot for the freshly enabled / scheduled in event:
+ * generic counter allocator: get next free counter
*/
-static int x86_pmu_enable(struct perf_event *event)
+static int
+gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
+{
+ int idx;
+
+ idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events);
+ return idx == x86_pmu.num_events ? -1 : idx;
+}
+
+/*
+ * intel-specific counter allocator: check event constraints
+ */
+static int
+intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
+{
+ const struct event_constraint *event_constraint;
+ int i, code;
+
+ if (!event_constraints)
+ goto skip;
+
+ code = hwc->config & CORE_EVNTSEL_EVENT_MASK;
+
+ for_each_event_constraint(event_constraint, event_constraints) {
+ if (code == event_constraint->code) {
+ for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) {
+ if (!test_and_set_bit(i, cpuc->used_mask))
+ return i;
+ }
+ return -1;
+ }
+ }
+skip:
+ return gen_get_event_idx(cpuc, hwc);
+}
+
+static int
+x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct hw_perf_event *hwc = &event->hw;
int idx;
- idx = fixed_mode_idx(event, hwc);
+ idx = fixed_mode_idx(hwc);
if (idx == X86_PMC_IDX_FIXED_BTS) {
/* BTS is already occupied. */
if (test_and_set_bit(idx, cpuc->used_mask))
return -EAGAIN;
hwc->config_base = 0;
- hwc->event_base = 0;
+ hwc->event_base = 0;
hwc->idx = idx;
} else if (idx >= 0) {
/*
@@ -1396,20 +1493,35 @@ static int x86_pmu_enable(struct perf_event *event)
} else {
idx = hwc->idx;
/* Try to get the previous generic event again */
- if (test_and_set_bit(idx, cpuc->used_mask)) {
+ if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
try_generic:
- idx = find_first_zero_bit(cpuc->used_mask,
- x86_pmu.num_events);
- if (idx == x86_pmu.num_events)
+ idx = x86_pmu.get_event_idx(cpuc, hwc);
+ if (idx == -1)
return -EAGAIN;
set_bit(idx, cpuc->used_mask);
hwc->idx = idx;
}
- hwc->config_base = x86_pmu.eventsel;
- hwc->event_base = x86_pmu.perfctr;
+ hwc->config_base = x86_pmu.eventsel;
+ hwc->event_base = x86_pmu.perfctr;
}
+ return idx;
+}
+
+/*
+ * Find a PMC slot for the freshly enabled / scheduled in event:
+ */
+static int x86_pmu_enable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+
+ idx = x86_schedule_event(cpuc, hwc);
+ if (idx < 0)
+ return idx;
+
perf_events_lapic_init();
x86_pmu.disable(hwc, idx);
@@ -1877,6 +1989,7 @@ static struct x86_pmu p6_pmu = {
*/
.event_bits = 32,
.event_mask = (1ULL << 32) - 1,
+ .get_event_idx = intel_get_event_idx,
};
static struct x86_pmu intel_pmu = {
@@ -1900,6 +2013,7 @@ static struct x86_pmu intel_pmu = {
.max_period = (1ULL << 31) - 1,
.enable_bts = intel_pmu_enable_bts,
.disable_bts = intel_pmu_disable_bts,
+ .get_event_idx = intel_get_event_idx,
};
static struct x86_pmu amd_pmu = {
@@ -1920,6 +2034,7 @@ static struct x86_pmu amd_pmu = {
.apic = 1,
/* use highest bit to detect overflow */
.max_period = (1ULL << 47) - 1,
+ .get_event_idx = gen_get_event_idx,
};
static int p6_pmu_init(void)
@@ -1932,10 +2047,12 @@ static int p6_pmu_init(void)
case 7:
case 8:
case 11: /* Pentium III */
+ event_constraints = intel_p6_event_constraints;
break;
case 9:
case 13:
/* Pentium M */
+ event_constraints = intel_p6_event_constraints;
break;
default:
pr_cont("unsupported p6 CPU model %d ",
@@ -2007,12 +2124,14 @@ static int intel_pmu_init(void)
sizeof(hw_cache_event_ids));
pr_cont("Core2 events, ");
+ event_constraints = intel_core_event_constraints;
break;
default:
case 26:
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
+ event_constraints = intel_nehalem_event_constraints;
pr_cont("Nehalem/Corei7 events, ");
break;
case 28:
@@ -2105,11 +2224,47 @@ static const struct pmu pmu = {
.unthrottle = x86_pmu_unthrottle,
};
+static int
+validate_event(struct cpu_hw_events *cpuc, struct perf_event *event)
+{
+ struct hw_perf_event fake_event = event->hw;
+
+ if (event->pmu != &pmu)
+ return 0;
+
+ return x86_schedule_event(cpuc, &fake_event);
+}
+
+static int validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct cpu_hw_events fake_pmu;
+
+ memset(&fake_pmu, 0, sizeof(fake_pmu));
+
+ if (!validate_event(&fake_pmu, leader))
+ return -ENOSPC;
+
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (!validate_event(&fake_pmu, sibling))
+ return -ENOSPC;
+ }
+
+ if (!validate_event(&fake_pmu, event))
+ return -ENOSPC;
+
+ return 0;
+}
+
const struct pmu *hw_perf_event_init(struct perf_event *event)
{
int err;
err = __hw_perf_event_init(event);
+ if (!err) {
+ if (event->group_leader != event)
+ err = validate_group(event);
+ }
if (err) {
if (event->destroy)
event->destroy(event);
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index beb30da203d6..50b9c220e121 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1209,17 +1209,14 @@ END(ftrace_graph_caller)
.globl return_to_handler
return_to_handler:
- pushl $0
pushl %eax
- pushl %ecx
pushl %edx
movl %ebp, %eax
call ftrace_return_to_handler
- movl %eax, 0xc(%esp)
+ movl %eax, %ecx
popl %edx
- popl %ecx
popl %eax
- ret
+ jmp *%ecx
#endif
.section .rodata,"a"
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 42a0b2cbf2e1..722df1b1152d 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -155,11 +155,11 @@ GLOBAL(return_to_handler)
call ftrace_return_to_handler
- movq %rax, 16(%rsp)
+ movq %rax, %rdi
movq 8(%rsp), %rdx
movq (%rsp), %rax
- addq $16, %rsp
- retq
+ addq $24, %rsp
+ jmp *%rdi
#endif
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 9dbb527e1652..5a1b9758fd62 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -9,6 +9,8 @@
* the dangers of modifying code on the run.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include <linux/uaccess.h>
@@ -336,15 +338,15 @@ int __init ftrace_dyn_arch_init(void *data)
switch (faulted) {
case 0:
- pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
+ pr_info("converting mcount calls to 0f 1f 44 00 00\n");
memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
break;
case 1:
- pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
+ pr_info("converting mcount calls to 66 66 66 66 90\n");
memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
break;
case 2:
- pr_info("ftrace: converting mcount calls to jmp . + 5\n");
+ pr_info("converting mcount calls to jmp . + 5\n");
memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
break;
}
@@ -468,82 +470,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
#ifdef CONFIG_FTRACE_SYSCALLS
-extern unsigned long __start_syscalls_metadata[];
-extern unsigned long __stop_syscalls_metadata[];
extern unsigned long *sys_call_table;
-static struct syscall_metadata **syscalls_metadata;
-
-static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
-{
- struct syscall_metadata *start;
- struct syscall_metadata *stop;
- char str[KSYM_SYMBOL_LEN];
-
-
- start = (struct syscall_metadata *)__start_syscalls_metadata;
- stop = (struct syscall_metadata *)__stop_syscalls_metadata;
- kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
-
- for ( ; start < stop; start++) {
- if (start->name && !strcmp(start->name, str))
- return start;
- }
- return NULL;
-}
-
-struct syscall_metadata *syscall_nr_to_meta(int nr)
-{
- if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
- return NULL;
-
- return syscalls_metadata[nr];
-}
-
-int syscall_name_to_nr(char *name)
+unsigned long __init arch_syscall_addr(int nr)
{
- int i;
-
- if (!syscalls_metadata)
- return -1;
-
- for (i = 0; i < NR_syscalls; i++) {
- if (syscalls_metadata[i]) {
- if (!strcmp(syscalls_metadata[i]->name, name))
- return i;
- }
- }
- return -1;
-}
-
-void set_syscall_enter_id(int num, int id)
-{
- syscalls_metadata[num]->enter_id = id;
-}
-
-void set_syscall_exit_id(int num, int id)
-{
- syscalls_metadata[num]->exit_id = id;
-}
-
-static int __init arch_init_ftrace_syscalls(void)
-{
- int i;
- struct syscall_metadata *meta;
- unsigned long **psys_syscall_table = &sys_call_table;
-
- syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
- NR_syscalls, GFP_KERNEL);
- if (!syscalls_metadata) {
- WARN_ON(1);
- return -ENOMEM;
- }
-
- for (i = 0; i < NR_syscalls; i++) {
- meta = find_syscall_meta(psys_syscall_table[i]);
- syscalls_metadata[i] = meta;
- }
- return 0;
+ return (unsigned long)(&sys_call_table)[nr];
}
-arch_initcall(arch_init_ftrace_syscalls);
#endif
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c
index 427fd1b56df5..8565d944f7cf 100644
--- a/arch/x86/mm/testmmiotrace.c
+++ b/arch/x86/mm/testmmiotrace.c
@@ -1,12 +1,13 @@
/*
* Written by Pekka Paalanen, 2008-2009 <pq@iki.fi>
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/io.h>
#include <linux/mmiotrace.h>
-#define MODULE_NAME "testmmiotrace"
-
static unsigned long mmio_address;
module_param(mmio_address, ulong, 0);
MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB "
@@ -30,7 +31,7 @@ static unsigned v32(unsigned i)
static void do_write_test(void __iomem *p)
{
unsigned int i;
- pr_info(MODULE_NAME ": write test.\n");
+ pr_info("write test.\n");
mmiotrace_printk("Write test.\n");
for (i = 0; i < 256; i++)
@@ -47,7 +48,7 @@ static void do_read_test(void __iomem *p)
{
unsigned int i;
unsigned errs[3] = { 0 };
- pr_info(MODULE_NAME ": read test.\n");
+ pr_info("read test.\n");
mmiotrace_printk("Read test.\n");
for (i = 0; i < 256; i++)
@@ -68,7 +69,7 @@ static void do_read_test(void __iomem *p)
static void do_read_far_test(void __iomem *p)
{
- pr_info(MODULE_NAME ": read far test.\n");
+ pr_info("read far test.\n");
mmiotrace_printk("Read far test.\n");
ioread32(p + read_far);
@@ -78,7 +79,7 @@ static void do_test(unsigned long size)
{
void __iomem *p = ioremap_nocache(mmio_address, size);
if (!p) {
- pr_err(MODULE_NAME ": could not ioremap, aborting.\n");
+ pr_err("could not ioremap, aborting.\n");
return;
}
mmiotrace_printk("ioremap returned %p.\n", p);
@@ -94,24 +95,22 @@ static int __init init(void)
unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
if (mmio_address == 0) {
- pr_err(MODULE_NAME ": you have to use the module argument "
- "mmio_address.\n");
- pr_err(MODULE_NAME ": DO NOT LOAD THIS MODULE UNLESS"
- " YOU REALLY KNOW WHAT YOU ARE DOING!\n");
+ pr_err("you have to use the module argument mmio_address.\n");
+ pr_err("DO NOT LOAD THIS MODULE UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!\n");
return -ENXIO;
}
- pr_warning(MODULE_NAME ": WARNING: mapping %lu kB @ 0x%08lx in PCI "
- "address space, and writing 16 kB of rubbish in there.\n",
- size >> 10, mmio_address);
+ pr_warning("WARNING: mapping %lu kB @ 0x%08lx in PCI address space, "
+ "and writing 16 kB of rubbish in there.\n",
+ size >> 10, mmio_address);
do_test(size);
- pr_info(MODULE_NAME ": All done.\n");
+ pr_info("All done.\n");
return 0;
}
static void __exit cleanup(void)
{
- pr_debug(MODULE_NAME ": unloaded.\n");
+ pr_debug("unloaded.\n");
}
module_init(init);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 3451c55acb59..f7b47c336703 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -144,7 +144,7 @@ extern char *trace_profile_buf_nmi;
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
extern void destroy_preds(struct ftrace_event_call *call);
-extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
+extern int filter_match_preds(struct event_filter *filter, void *rec);
extern int filter_current_check_discard(struct ring_buffer *buffer,
struct ftrace_event_call *call,
void *rec,
@@ -187,4 +187,13 @@ do { \
__trace_printk(ip, fmt, ##args); \
} while (0)
+#ifdef CONFIG_EVENT_PROFILE
+struct perf_event;
+extern int ftrace_profile_enable(int event_id);
+extern void ftrace_profile_disable(int event_id);
+extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
+ char *filter_str);
+extern void ftrace_profile_free_filter(struct perf_event *event);
+#endif
+
#endif /* _LINUX_FTRACE_EVENT_H */
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 7b7fbf433cff..91a2b4309e7a 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -225,6 +225,7 @@ struct perf_counter_attr {
#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64)
#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5)
+#define PERF_COUNTER_IOC_SET_FILTER _IOW('$', 6, char *)
enum perf_counter_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2e6d95f97419..df9d964c15fc 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -221,6 +221,7 @@ struct perf_event_attr {
#define PERF_EVENT_IOC_RESET _IO ('$', 3)
#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64)
#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
+#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -633,7 +634,12 @@ struct perf_event {
struct pid_namespace *ns;
u64 id;
+
+#ifdef CONFIG_EVENT_PROFILE
+ struct event_filter *filter;
#endif
+
+#endif /* CONFIG_PERF_EVENTS */
};
/**
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index 813be59bf345..2ea1dd1ba21c 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -24,8 +24,21 @@ static inline int reacquire_kernel_lock(struct task_struct *task)
return 0;
}
-extern void __lockfunc lock_kernel(void) __acquires(kernel_lock);
-extern void __lockfunc unlock_kernel(void) __releases(kernel_lock);
+extern void __lockfunc
+_lock_kernel(const char *func, const char *file, int line)
+__acquires(kernel_lock);
+
+extern void __lockfunc
+_unlock_kernel(const char *func, const char *file, int line)
+__releases(kernel_lock);
+
+#define lock_kernel() do { \
+ _lock_kernel(__func__, __FILE__, __LINE__); \
+} while (0)
+
+#define unlock_kernel() do { \
+ _unlock_kernel(__func__, __FILE__, __LINE__); \
+} while (0)
/*
* Various legacy drivers don't really need the BKL in a specific
@@ -41,8 +54,8 @@ static inline void cycle_kernel_lock(void)
#else
-#define lock_kernel() do { } while(0)
-#define unlock_kernel() do { } while(0)
+#define lock_kernel()
+#define unlock_kernel()
#define release_kernel_lock(task) do { } while(0)
#define cycle_kernel_lock() do { } while(0)
#define reacquire_kernel_lock(task) 0
diff --git a/include/trace/events/bkl.h b/include/trace/events/bkl.h
new file mode 100644
index 000000000000..1af72dc24278
--- /dev/null
+++ b/include/trace/events/bkl.h
@@ -0,0 +1,61 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM bkl
+
+#if !defined(_TRACE_BKL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_BKL_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(lock_kernel,
+
+ TP_PROTO(const char *func, const char *file, int line),
+
+ TP_ARGS(func, file, line),
+
+ TP_STRUCT__entry(
+ __field( int, depth )
+ __field_ext( const char *, func, FILTER_PTR_STRING )
+ __field_ext( const char *, file, FILTER_PTR_STRING )
+ __field( int, line )
+ ),
+
+ TP_fast_assign(
+ /* We want to record the lock_depth after lock is acquired */
+ __entry->depth = current->lock_depth + 1;
+ __entry->func = func;
+ __entry->file = file;
+ __entry->line = line;
+ ),
+
+ TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth,
+ __entry->file, __entry->line, __entry->func)
+);
+
+TRACE_EVENT(unlock_kernel,
+
+ TP_PROTO(const char *func, const char *file, int line),
+
+ TP_ARGS(func, file, line),
+
+ TP_STRUCT__entry(
+ __field(int, depth )
+ __field(const char *, func )
+ __field(const char *, file )
+ __field(int, line )
+ ),
+
+ TP_fast_assign(
+ __entry->depth = current->lock_depth;
+ __entry->func = func;
+ __entry->file = file;
+ __entry->line = line;
+ ),
+
+ TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth,
+ __entry->file, __entry->line, __entry->func)
+);
+
+#endif /* _TRACE_BKL_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index b89f9db4a404..dcfcd4407623 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -48,7 +48,7 @@ TRACE_EVENT(irq_handler_entry,
__assign_str(name, action->name);
),
- TP_printk("irq=%d handler=%s", __entry->irq, __get_str(name))
+ TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
);
/**
@@ -78,7 +78,7 @@ TRACE_EVENT(irq_handler_exit,
__entry->ret = ret;
),
- TP_printk("irq=%d return=%s",
+ TP_printk("irq=%d ret=%s",
__entry->irq, __entry->ret ? "handled" : "unhandled")
);
@@ -107,7 +107,7 @@ TRACE_EVENT(softirq_entry,
__entry->vec = (int)(h - vec);
),
- TP_printk("softirq=%d action=%s", __entry->vec,
+ TP_printk("vec=%d [action=%s]", __entry->vec,
show_softirq_name(__entry->vec))
);
@@ -136,7 +136,7 @@ TRACE_EVENT(softirq_exit,
__entry->vec = (int)(h - vec);
),
- TP_printk("softirq=%d action=%s", __entry->vec,
+ TP_printk("vec=%d [action=%s]", __entry->vec,
show_softirq_name(__entry->vec))
);
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index ea6d579261ad..9bb96e5a2848 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -16,8 +16,6 @@ enum {
};
#endif
-
-
TRACE_EVENT(power_start,
TP_PROTO(unsigned int type, unsigned int state),
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 4069c43f4187..b50b9856c59f 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -26,7 +26,7 @@ TRACE_EVENT(sched_kthread_stop,
__entry->pid = t->pid;
),
- TP_printk("task %s:%d", __entry->comm, __entry->pid)
+ TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
);
/*
@@ -46,7 +46,7 @@ TRACE_EVENT(sched_kthread_stop_ret,
__entry->ret = ret;
),
- TP_printk("ret %d", __entry->ret)
+ TP_printk("ret=%d", __entry->ret)
);
/*
@@ -73,7 +73,7 @@ TRACE_EVENT(sched_wait_task,
__entry->prio = p->prio;
),
- TP_printk("task %s:%d [%d]",
+ TP_printk("comm=%s pid=%d prio=%d",
__entry->comm, __entry->pid, __entry->prio)
);
@@ -94,7 +94,7 @@ TRACE_EVENT(sched_wakeup,
__field( pid_t, pid )
__field( int, prio )
__field( int, success )
- __field( int, cpu )
+ __field( int, target_cpu )
),
TP_fast_assign(
@@ -102,12 +102,12 @@ TRACE_EVENT(sched_wakeup,
__entry->pid = p->pid;
__entry->prio = p->prio;
__entry->success = success;
- __entry->cpu = task_cpu(p);
+ __entry->target_cpu = task_cpu(p);
),
- TP_printk("task %s:%d [%d] success=%d [%03d]",
+ TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
__entry->comm, __entry->pid, __entry->prio,
- __entry->success, __entry->cpu)
+ __entry->success, __entry->target_cpu)
);
/*
@@ -127,7 +127,7 @@ TRACE_EVENT(sched_wakeup_new,
__field( pid_t, pid )
__field( int, prio )
__field( int, success )
- __field( int, cpu )
+ __field( int, target_cpu )
),
TP_fast_assign(
@@ -135,12 +135,12 @@ TRACE_EVENT(sched_wakeup_new,
__entry->pid = p->pid;
__entry->prio = p->prio;
__entry->success = success;
- __entry->cpu = task_cpu(p);
+ __entry->target_cpu = task_cpu(p);
),
- TP_printk("task %s:%d [%d] success=%d [%03d]",
+ TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
__entry->comm, __entry->pid, __entry->prio,
- __entry->success, __entry->cpu)
+ __entry->success, __entry->target_cpu)
);
/*
@@ -176,7 +176,7 @@ TRACE_EVENT(sched_switch,
__entry->next_prio = next->prio;
),
- TP_printk("task %s:%d [%d] (%s) ==> %s:%d [%d]",
+ TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
__entry->prev_state ?
__print_flags(__entry->prev_state, "|",
@@ -211,7 +211,7 @@ TRACE_EVENT(sched_migrate_task,
__entry->dest_cpu = dest_cpu;
),
- TP_printk("task %s:%d [%d] from: %d to: %d",
+ TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
__entry->comm, __entry->pid, __entry->prio,
__entry->orig_cpu, __entry->dest_cpu)
);
@@ -237,7 +237,7 @@ TRACE_EVENT(sched_process_free,
__entry->prio = p->prio;
),
- TP_printk("task %s:%d [%d]",
+ TP_printk("comm=%s pid=%d prio=%d",
__entry->comm, __entry->pid, __entry->prio)
);
@@ -262,7 +262,7 @@ TRACE_EVENT(sched_process_exit,
__entry->prio = p->prio;
),
- TP_printk("task %s:%d [%d]",
+ TP_printk("comm=%s pid=%d prio=%d",
__entry->comm, __entry->pid, __entry->prio)
);
@@ -287,7 +287,7 @@ TRACE_EVENT(sched_process_wait,
__entry->prio = current->prio;
),
- TP_printk("task %s:%d [%d]",
+ TP_printk("comm=%s pid=%d prio=%d",
__entry->comm, __entry->pid, __entry->prio)
);
@@ -314,7 +314,7 @@ TRACE_EVENT(sched_process_fork,
__entry->child_pid = child->pid;
),
- TP_printk("parent %s:%d child %s:%d",
+ TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
__entry->parent_comm, __entry->parent_pid,
__entry->child_comm, __entry->child_pid)
);
@@ -340,7 +340,7 @@ TRACE_EVENT(sched_signal_send,
__entry->sig = sig;
),
- TP_printk("sig: %d task %s:%d",
+ TP_printk("sig=%d comm=%s pid=%d",
__entry->sig, __entry->comm, __entry->pid)
);
@@ -374,7 +374,7 @@ TRACE_EVENT(sched_stat_wait,
__perf_count(delay);
),
- TP_printk("task: %s:%d wait: %Lu [ns]",
+ TP_printk("comm=%s pid=%d delay=%Lu [ns]",
__entry->comm, __entry->pid,
(unsigned long long)__entry->delay)
);
@@ -406,7 +406,7 @@ TRACE_EVENT(sched_stat_runtime,
__perf_count(runtime);
),
- TP_printk("task: %s:%d runtime: %Lu [ns], vruntime: %Lu [ns]",
+ TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
__entry->comm, __entry->pid,
(unsigned long long)__entry->runtime,
(unsigned long long)__entry->vruntime)
@@ -437,7 +437,7 @@ TRACE_EVENT(sched_stat_sleep,
__perf_count(delay);
),
- TP_printk("task: %s:%d sleep: %Lu [ns]",
+ TP_printk("comm=%s pid=%d delay=%Lu [ns]",
__entry->comm, __entry->pid,
(unsigned long long)__entry->delay)
);
@@ -467,7 +467,7 @@ TRACE_EVENT(sched_stat_iowait,
__perf_count(delay);
),
- TP_printk("task: %s:%d iowait: %Lu [ns]",
+ TP_printk("comm=%s pid=%d delay=%Lu [ns]",
__entry->comm, __entry->pid,
(unsigned long long)__entry->delay)
);
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 1844c48d640e..e5ce87a0498d 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -26,7 +26,7 @@ TRACE_EVENT(timer_init,
__entry->timer = timer;
),
- TP_printk("timer %p", __entry->timer)
+ TP_printk("timer=%p", __entry->timer)
);
/**
@@ -54,7 +54,7 @@ TRACE_EVENT(timer_start,
__entry->now = jiffies;
),
- TP_printk("timer %p: func %pf, expires %lu, timeout %ld",
+ TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
__entry->timer, __entry->function, __entry->expires,
(long)__entry->expires - __entry->now)
);
@@ -81,7 +81,7 @@ TRACE_EVENT(timer_expire_entry,
__entry->now = jiffies;
),
- TP_printk("timer %p: now %lu", __entry->timer, __entry->now)
+ TP_printk("timer=%p now=%lu", __entry->timer, __entry->now)
);
/**
@@ -108,7 +108,7 @@ TRACE_EVENT(timer_expire_exit,
__entry->timer = timer;
),
- TP_printk("timer %p", __entry->timer)
+ TP_printk("timer=%p", __entry->timer)
);
/**
@@ -129,7 +129,7 @@ TRACE_EVENT(timer_cancel,
__entry->timer = timer;
),
- TP_printk("timer %p", __entry->timer)
+ TP_printk("timer=%p", __entry->timer)
);
/**
@@ -140,24 +140,24 @@ TRACE_EVENT(timer_cancel,
*/
TRACE_EVENT(hrtimer_init,
- TP_PROTO(struct hrtimer *timer, clockid_t clockid,
+ TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid,
enum hrtimer_mode mode),
- TP_ARGS(timer, clockid, mode),
+ TP_ARGS(hrtimer, clockid, mode),
TP_STRUCT__entry(
- __field( void *, timer )
+ __field( void *, hrtimer )
__field( clockid_t, clockid )
__field( enum hrtimer_mode, mode )
),
TP_fast_assign(
- __entry->timer = timer;
+ __entry->hrtimer = hrtimer;
__entry->clockid = clockid;
__entry->mode = mode;
),
- TP_printk("hrtimer %p, clockid %s, mode %s", __entry->timer,
+ TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
__entry->clockid == CLOCK_REALTIME ?
"CLOCK_REALTIME" : "CLOCK_MONOTONIC",
__entry->mode == HRTIMER_MODE_ABS ?
@@ -170,26 +170,26 @@ TRACE_EVENT(hrtimer_init,
*/
TRACE_EVENT(hrtimer_start,
- TP_PROTO(struct hrtimer *timer),
+ TP_PROTO(struct hrtimer *hrtimer),
- TP_ARGS(timer),
+ TP_ARGS(hrtimer),
TP_STRUCT__entry(
- __field( void *, timer )
+ __field( void *, hrtimer )
__field( void *, function )
__field( s64, expires )
__field( s64, softexpires )
),
TP_fast_assign(
- __entry->timer = timer;
- __entry->function = timer->function;
- __entry->expires = hrtimer_get_expires(timer).tv64;
- __entry->softexpires = hrtimer_get_softexpires(timer).tv64;
+ __entry->hrtimer = hrtimer;
+ __entry->function = hrtimer->function;
+ __entry->expires = hrtimer_get_expires(hrtimer).tv64;
+ __entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64;
),
- TP_printk("hrtimer %p, func %pf, expires %llu, softexpires %llu",
- __entry->timer, __entry->function,
+ TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
+ __entry->hrtimer, __entry->function,
(unsigned long long)ktime_to_ns((ktime_t) {
.tv64 = __entry->expires }),
(unsigned long long)ktime_to_ns((ktime_t) {
@@ -206,23 +206,22 @@ TRACE_EVENT(hrtimer_start,
*/
TRACE_EVENT(hrtimer_expire_entry,
- TP_PROTO(struct hrtimer *timer, ktime_t *now),
+ TP_PROTO(struct hrtimer *hrtimer, ktime_t *now),
- TP_ARGS(timer, now),
+ TP_ARGS(hrtimer, now),
TP_STRUCT__entry(
- __field( void *, timer )
+ __field( void *, hrtimer )
__field( s64, now )
),
TP_fast_assign(
- __entry->timer = timer;
- __entry->now = now->tv64;
+ __entry->hrtimer = hrtimer;
+ __entry->now = now->tv64;
),
- TP_printk("hrtimer %p, now %llu", __entry->timer,
- (unsigned long long)ktime_to_ns((ktime_t) {
- .tv64 = __entry->now }))
+ TP_printk("hrtimer=%p now=%llu", __entry->hrtimer,
+ (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
);
/**
@@ -234,40 +233,40 @@ TRACE_EVENT(hrtimer_expire_entry,
*/
TRACE_EVENT(hrtimer_expire_exit,
- TP_PROTO(struct hrtimer *timer),
+ TP_PROTO(struct hrtimer *hrtimer),
- TP_ARGS(timer),
+ TP_ARGS(hrtimer),
TP_STRUCT__entry(
- __field( void *, timer )
+ __field( void *, hrtimer )
),
TP_fast_assign(
- __entry->timer = timer;
+ __entry->hrtimer = hrtimer;
),
- TP_printk("hrtimer %p", __entry->timer)
+ TP_printk("hrtimer=%p", __entry->hrtimer)
);
/**
* hrtimer_cancel - called when the hrtimer is canceled
- * @timer: pointer to struct hrtimer
+ * @hrtimer: pointer to struct hrtimer
*/
TRACE_EVENT(hrtimer_cancel,
- TP_PROTO(struct hrtimer *timer),
+ TP_PROTO(struct hrtimer *hrtimer),
- TP_ARGS(timer),
+ TP_ARGS(hrtimer),
TP_STRUCT__entry(
- __field( void *, timer )
+ __field( void *, hrtimer )
),
TP_fast_assign(
- __entry->timer = timer;
+ __entry->hrtimer = hrtimer;
),
- TP_printk("hrtimer %p", __entry->timer)
+ TP_printk("hrtimer=%p", __entry->hrtimer)
);
/**
@@ -302,7 +301,7 @@ TRACE_EVENT(itimer_state,
__entry->interval_usec = value->it_interval.tv_usec;
),
- TP_printk("which %d, expires %lu, it_value %lu.%lu, it_interval %lu.%lu",
+ TP_printk("which=%d expires=%lu it_value=%lu.%lu it_interval=%lu.%lu",
__entry->which, __entry->expires,
__entry->value_sec, __entry->value_usec,
__entry->interval_sec, __entry->interval_usec)
@@ -332,7 +331,7 @@ TRACE_EVENT(itimer_expire,
__entry->pid = pid_nr(pid);
),
- TP_printk("which %d, pid %d, now %lu", __entry->which,
+ TP_printk("which=%d pid=%d now=%lu", __entry->which,
(int) __entry->pid, __entry->now)
);
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 54d02c06ae7e..a7f946094128 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -120,9 +120,10 @@
#undef __field
#define __field(type, item) \
ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
- "offset:%u;\tsize:%u;\n", \
+ "offset:%u;\tsize:%u;\tsigned:%u;\n", \
(unsigned int)offsetof(typeof(field), item), \
- (unsigned int)sizeof(field.item)); \
+ (unsigned int)sizeof(field.item), \
+ (unsigned int)is_signed_type(type)); \
if (!ret) \
return 0;
@@ -132,19 +133,21 @@
#undef __array
#define __array(type, item, len) \
ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
- "offset:%u;\tsize:%u;\n", \
+ "offset:%u;\tsize:%u;\tsigned:%u;\n", \
(unsigned int)offsetof(typeof(field), item), \
- (unsigned int)sizeof(field.item)); \
+ (unsigned int)sizeof(field.item), \
+ (unsigned int)is_signed_type(type)); \
if (!ret) \
return 0;
#undef __dynamic_array
#define __dynamic_array(type, item, len) \
ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
- "offset:%u;\tsize:%u;\n", \
+ "offset:%u;\tsize:%u;\tsigned:%u;\n", \
(unsigned int)offsetof(typeof(field), \
__data_loc_##item), \
- (unsigned int)sizeof(field.__data_loc_##item)); \
+ (unsigned int)sizeof(field.__data_loc_##item), \
+ (unsigned int)is_signed_type(type)); \
if (!ret) \
return 0;
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index e290b86f6167..51ee17d3632a 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -33,7 +33,7 @@ struct syscall_metadata {
};
#ifdef CONFIG_FTRACE_SYSCALLS
-extern struct syscall_metadata *syscall_nr_to_meta(int nr);
+extern unsigned long arch_syscall_addr(int nr);
extern int syscall_name_to_nr(char *name);
void set_syscall_enter_id(int num, int id);
void set_syscall_exit_id(int num, int id);
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 9d0b5c665883..9ecaa45ab6b2 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -28,6 +28,7 @@
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
#include <linux/perf_event.h>
+#include <linux/ftrace_event.h>
#include <asm/irq_regs.h>
@@ -1355,7 +1356,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
u64 interrupts, freq;
spin_lock(&ctx->lock);
- list_for_each_entry(event, &ctx->group_list, group_entry) {
+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
@@ -1658,6 +1659,8 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
return ERR_PTR(err);
}
+static void perf_event_free_filter(struct perf_event *event);
+
static void free_event_rcu(struct rcu_head *head)
{
struct perf_event *event;
@@ -1665,6 +1668,7 @@ static void free_event_rcu(struct rcu_head *head)
event = container_of(head, struct perf_event, rcu_head);
if (event->ns)
put_pid_ns(event->ns);
+ perf_event_free_filter(event);
kfree(event);
}
@@ -1974,7 +1978,8 @@ unlock:
return ret;
}
-int perf_event_set_output(struct perf_event *event, int output_fd);
+static int perf_event_set_output(struct perf_event *event, int output_fd);
+static int perf_event_set_filter(struct perf_event *event, void __user *arg);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
@@ -2002,6 +2007,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case PERF_EVENT_IOC_SET_OUTPUT:
return perf_event_set_output(event, arg);
+ case PERF_EVENT_IOC_SET_FILTER:
+ return perf_event_set_filter(event, (void __user *)arg);
+
default:
return -ENOTTY;
}
@@ -3806,9 +3814,14 @@ static int perf_swevent_is_counting(struct perf_event *event)
return 1;
}
+static int perf_tp_event_match(struct perf_event *event,
+ struct perf_sample_data *data);
+
static int perf_swevent_match(struct perf_event *event,
enum perf_type_id type,
- u32 event_id, struct pt_regs *regs)
+ u32 event_id,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
{
if (!perf_swevent_is_counting(event))
return 0;
@@ -3826,6 +3839,10 @@ static int perf_swevent_match(struct perf_event *event,
return 0;
}
+ if (event->attr.type == PERF_TYPE_TRACEPOINT &&
+ !perf_tp_event_match(event, data))
+ return 0;
+
return 1;
}
@@ -3842,7 +3859,7 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx,
rcu_read_lock();
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
- if (perf_swevent_match(event, type, event_id, regs))
+ if (perf_swevent_match(event, type, event_id, data, regs))
perf_swevent_add(event, nr, nmi, data, regs);
}
rcu_read_unlock();
@@ -4086,6 +4103,7 @@ static const struct pmu perf_ops_task_clock = {
};
#ifdef CONFIG_EVENT_PROFILE
+
void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
int entry_size)
{
@@ -4109,8 +4127,15 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
}
EXPORT_SYMBOL_GPL(perf_tp_event);
-extern int ftrace_profile_enable(int);
-extern void ftrace_profile_disable(int);
+static int perf_tp_event_match(struct perf_event *event,
+ struct perf_sample_data *data)
+{
+ void *record = data->raw->data;
+
+ if (likely(!event->filter) || filter_match_preds(event->filter, record))
+ return 1;
+ return 0;
+}
static void tp_perf_event_destroy(struct perf_event *event)
{
@@ -4135,12 +4160,53 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
return &perf_ops_generic;
}
+
+static int perf_event_set_filter(struct perf_event *event, void __user *arg)
+{
+ char *filter_str;
+ int ret;
+
+ if (event->attr.type != PERF_TYPE_TRACEPOINT)
+ return -EINVAL;
+
+ filter_str = strndup_user(arg, PAGE_SIZE);
+ if (IS_ERR(filter_str))
+ return PTR_ERR(filter_str);
+
+ ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
+
+ kfree(filter_str);
+ return ret;
+}
+
+static void perf_event_free_filter(struct perf_event *event)
+{
+ ftrace_profile_free_filter(event);
+}
+
#else
+
+static int perf_tp_event_match(struct perf_event *event,
+ struct perf_sample_data *data)
+{
+ return 1;
+}
+
static const struct pmu *tp_perf_event_init(struct perf_event *event)
{
return NULL;
}
-#endif
+
+static int perf_event_set_filter(struct perf_event *event, void __user *arg)
+{
+ return -ENOENT;
+}
+
+static void perf_event_free_filter(struct perf_event *event)
+{
+}
+
+#endif /* CONFIG_EVENT_PROFILE */
atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
@@ -4394,7 +4460,7 @@ err_size:
goto out;
}
-int perf_event_set_output(struct perf_event *event, int output_fd)
+static int perf_event_set_output(struct perf_event *event, int output_fd)
{
struct perf_event *output_event = NULL;
struct file *output_file = NULL;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 37ba67e33265..b10c0d90a6ff 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -60,6 +60,13 @@ static int last_ftrace_enabled;
/* Quick disabling of function tracer. */
int function_trace_stop;
+/* List for set_ftrace_pid's pids. */
+LIST_HEAD(ftrace_pids);
+struct ftrace_pid {
+ struct list_head list;
+ struct pid *pid;
+};
+
/*
* ftrace_disabled is set when an anomaly is discovered.
* ftrace_disabled is much stronger than ftrace_enabled.
@@ -78,6 +85,10 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
+#endif
+
static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
{
struct ftrace_ops *op = ftrace_list;
@@ -155,7 +166,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
else
func = ftrace_list_func;
- if (ftrace_pid_trace) {
+ if (!list_empty(&ftrace_pids)) {
set_ftrace_pid_function(func);
func = ftrace_pid_func;
}
@@ -203,7 +214,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
if (ftrace_list->next == &ftrace_list_end) {
ftrace_func_t func = ftrace_list->func;
- if (ftrace_pid_trace) {
+ if (!list_empty(&ftrace_pids)) {
set_ftrace_pid_function(func);
func = ftrace_pid_func;
}
@@ -231,7 +242,7 @@ static void ftrace_update_pid_func(void)
func = __ftrace_trace_function;
#endif
- if (ftrace_pid_trace) {
+ if (!list_empty(&ftrace_pids)) {
set_ftrace_pid_function(func);
func = ftrace_pid_func;
} else {
@@ -821,8 +832,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
}
#endif /* CONFIG_FUNCTION_PROFILER */
-/* set when tracing only a pid */
-struct pid *ftrace_pid_trace;
static struct pid * const ftrace_swapper_pid = &init_struct_pid;
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -1261,12 +1270,34 @@ static int ftrace_update_code(struct module *mod)
ftrace_new_addrs = p->newlist;
p->flags = 0L;
- /* convert record (i.e, patch mcount-call with NOP) */
- if (ftrace_code_disable(mod, p)) {
- p->flags |= FTRACE_FL_CONVERTED;
- ftrace_update_cnt++;
- } else
+ /*
+ * Do the initial record convertion from mcount jump
+ * to the NOP instructions.
+ */
+ if (!ftrace_code_disable(mod, p)) {
ftrace_free_rec(p);
+ continue;
+ }
+
+ p->flags |= FTRACE_FL_CONVERTED;
+ ftrace_update_cnt++;
+
+ /*
+ * If the tracing is enabled, go ahead and enable the record.
+ *
+ * The reason not to enable the record immediatelly is the
+ * inherent check of ftrace_make_nop/ftrace_make_call for
+ * correct previous instructions. Making first the NOP
+ * conversion puts the module to the correct state, thus
+ * passing the ftrace_make_call check.
+ */
+ if (ftrace_start_up) {
+ int failed = __ftrace_replace_code(p, 1);
+ if (failed) {
+ ftrace_bug(failed, p->ip);
+ ftrace_free_rec(p);
+ }
+ }
}
stop = ftrace_now(raw_smp_processor_id());
@@ -1656,60 +1687,6 @@ ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
return ret;
}
-enum {
- MATCH_FULL,
- MATCH_FRONT_ONLY,
- MATCH_MIDDLE_ONLY,
- MATCH_END_ONLY,
-};
-
-/*
- * (static function - no need for kernel doc)
- *
- * Pass in a buffer containing a glob and this function will
- * set search to point to the search part of the buffer and
- * return the type of search it is (see enum above).
- * This does modify buff.
- *
- * Returns enum type.
- * search returns the pointer to use for comparison.
- * not returns 1 if buff started with a '!'
- * 0 otherwise.
- */
-static int
-ftrace_setup_glob(char *buff, int len, char **search, int *not)
-{
- int type = MATCH_FULL;
- int i;
-
- if (buff[0] == '!') {
- *not = 1;
- buff++;
- len--;
- } else
- *not = 0;
-
- *search = buff;
-
- for (i = 0; i < len; i++) {
- if (buff[i] == '*') {
- if (!i) {
- *search = buff + 1;
- type = MATCH_END_ONLY;
- } else {
- if (type == MATCH_END_ONLY)
- type = MATCH_MIDDLE_ONLY;
- else
- type = MATCH_FRONT_ONLY;
- buff[i] = 0;
- break;
- }
- }
- }
-
- return type;
-}
-
static int ftrace_match(char *str, char *regex, int len, int type)
{
int matched = 0;
@@ -1758,7 +1735,7 @@ static void ftrace_match_records(char *buff, int len, int enable)
int not;
flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
- type = ftrace_setup_glob(buff, len, &search, &not);
+ type = filter_parse_regex(buff, len, &search, &not);
search_len = strlen(search);
@@ -1826,7 +1803,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable)
}
if (strlen(buff)) {
- type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
+ type = filter_parse_regex(buff, strlen(buff), &search, &not);
search_len = strlen(search);
}
@@ -1991,7 +1968,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
int count = 0;
char *search;
- type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
+ type = filter_parse_regex(glob, strlen(glob), &search, &not);
len = strlen(search);
/* we do not support '!' for function probes */
@@ -2068,7 +2045,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
else if (glob) {
int not;
- type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
+ type = filter_parse_regex(glob, strlen(glob), &search, &not);
len = strlen(search);
/* we do not support '!' for function probes */
@@ -2297,6 +2274,7 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset)
#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
+static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
static int __init set_ftrace_notrace(char *str)
{
@@ -2312,6 +2290,31 @@ static int __init set_ftrace_filter(char *str)
}
__setup("ftrace_filter=", set_ftrace_filter);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int __init set_graph_function(char *str)
+{
+ strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
+ return 1;
+}
+__setup("ftrace_graph_filter=", set_graph_function);
+
+static void __init set_ftrace_early_graph(char *buf)
+{
+ int ret;
+ char *func;
+
+ while (buf) {
+ func = strsep(&buf, ",");
+ /* we allow only one expression at a time */
+ ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
+ func);
+ if (ret)
+ printk(KERN_DEBUG "ftrace: function %s not "
+ "traceable\n", func);
+ }
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
static void __init set_ftrace_early_filter(char *buf, int enable)
{
char *func;
@@ -2328,6 +2331,10 @@ static void __init set_ftrace_early_filters(void)
set_ftrace_early_filter(ftrace_filter_buf, 1);
if (ftrace_notrace_buf[0])
set_ftrace_early_filter(ftrace_notrace_buf, 0);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if (ftrace_graph_buf[0])
+ set_ftrace_early_graph(ftrace_graph_buf);
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
}
static int
@@ -2513,7 +2520,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
return -ENODEV;
/* decode regex */
- type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
+ type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
if (not)
return -EINVAL;
@@ -2624,7 +2631,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
return 0;
}
-static int ftrace_convert_nops(struct module *mod,
+static int ftrace_process_locs(struct module *mod,
unsigned long *start,
unsigned long *end)
{
@@ -2684,7 +2691,7 @@ static void ftrace_init_module(struct module *mod,
{
if (ftrace_disabled || start == end)
return;
- ftrace_convert_nops(mod, start, end);
+ ftrace_process_locs(mod, start, end);
}
static int ftrace_module_notify(struct notifier_block *self,
@@ -2745,7 +2752,7 @@ void __init ftrace_init(void)
last_ftrace_enabled = ftrace_enabled = 1;
- ret = ftrace_convert_nops(NULL,
+ ret = ftrace_process_locs(NULL,
__start_mcount_loc,
__stop_mcount_loc);
@@ -2778,23 +2785,6 @@ static inline void ftrace_startup_enable(int command) { }
# define ftrace_shutdown_sysctl() do { } while (0)
#endif /* CONFIG_DYNAMIC_FTRACE */
-static ssize_t
-ftrace_pid_read(struct file *file, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- char buf[64];
- int r;
-
- if (ftrace_pid_trace == ftrace_swapper_pid)
- r = sprintf(buf, "swapper tasks\n");
- else if (ftrace_pid_trace)
- r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
- else
- r = sprintf(buf, "no pid\n");
-
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-}
-
static void clear_ftrace_swapper(void)
{
struct task_struct *p;
@@ -2845,14 +2835,12 @@ static void set_ftrace_pid(struct pid *pid)
rcu_read_unlock();
}
-static void clear_ftrace_pid_task(struct pid **pid)
+static void clear_ftrace_pid_task(struct pid *pid)
{
- if (*pid == ftrace_swapper_pid)
+ if (pid == ftrace_swapper_pid)
clear_ftrace_swapper();
else
- clear_ftrace_pid(*pid);
-
- *pid = NULL;
+ clear_ftrace_pid(pid);
}
static void set_ftrace_pid_task(struct pid *pid)
@@ -2863,11 +2851,140 @@ static void set_ftrace_pid_task(struct pid *pid)
set_ftrace_pid(pid);
}
+static int ftrace_pid_add(int p)
+{
+ struct pid *pid;
+ struct ftrace_pid *fpid;
+ int ret = -EINVAL;
+
+ mutex_lock(&ftrace_lock);
+
+ if (!p)
+ pid = ftrace_swapper_pid;
+ else
+ pid = find_get_pid(p);
+
+ if (!pid)
+ goto out;
+
+ ret = 0;
+
+ list_for_each_entry(fpid, &ftrace_pids, list)
+ if (fpid->pid == pid)
+ goto out_put;
+
+ ret = -ENOMEM;
+
+ fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
+ if (!fpid)
+ goto out_put;
+
+ list_add(&fpid->list, &ftrace_pids);
+ fpid->pid = pid;
+
+ set_ftrace_pid_task(pid);
+
+ ftrace_update_pid_func();
+ ftrace_startup_enable(0);
+
+ mutex_unlock(&ftrace_lock);
+ return 0;
+
+out_put:
+ if (pid != ftrace_swapper_pid)
+ put_pid(pid);
+
+out:
+ mutex_unlock(&ftrace_lock);
+ return ret;
+}
+
+static void ftrace_pid_reset(void)
+{
+ struct ftrace_pid *fpid, *safe;
+
+ mutex_lock(&ftrace_lock);
+ list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
+ struct pid *pid = fpid->pid;
+
+ clear_ftrace_pid_task(pid);
+
+ list_del(&fpid->list);
+ kfree(fpid);
+ }
+
+ ftrace_update_pid_func();
+ ftrace_startup_enable(0);
+
+ mutex_unlock(&ftrace_lock);
+}
+
+static void *fpid_start(struct seq_file *m, loff_t *pos)
+{
+ mutex_lock(&ftrace_lock);
+
+ if (list_empty(&ftrace_pids) && (!*pos))
+ return (void *) 1;
+
+ return seq_list_start(&ftrace_pids, *pos);
+}
+
+static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ if (v == (void *)1)
+ return NULL;
+
+ return seq_list_next(v, &ftrace_pids, pos);
+}
+
+static void fpid_stop(struct seq_file *m, void *p)
+{
+ mutex_unlock(&ftrace_lock);
+}
+
+static int fpid_show(struct seq_file *m, void *v)
+{
+ const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
+
+ if (v == (void *)1) {
+ seq_printf(m, "no pid\n");
+ return 0;
+ }
+
+ if (fpid->pid == ftrace_swapper_pid)
+ seq_printf(m, "swapper tasks\n");
+ else
+ seq_printf(m, "%u\n", pid_vnr(fpid->pid));
+
+ return 0;
+}
+
+static const struct seq_operations ftrace_pid_sops = {
+ .start = fpid_start,
+ .next = fpid_next,
+ .stop = fpid_stop,
+ .show = fpid_show,
+};
+
+static int
+ftrace_pid_open(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+
+ if ((file->f_mode & FMODE_WRITE) &&
+ (file->f_flags & O_TRUNC))
+ ftrace_pid_reset();
+
+ if (file->f_mode & FMODE_READ)
+ ret = seq_open(file, &ftrace_pid_sops);
+
+ return ret;
+}
+
static ssize_t
ftrace_pid_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- struct pid *pid;
char buf[64];
long val;
int ret;
@@ -2880,57 +2997,38 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
buf[cnt] = 0;
+ /*
+ * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
+ * to clean the filter quietly.
+ */
+ strstrip(buf);
+ if (strlen(buf) == 0)
+ return 1;
+
ret = strict_strtol(buf, 10, &val);
if (ret < 0)
return ret;
- mutex_lock(&ftrace_lock);
- if (val < 0) {
- /* disable pid tracing */
- if (!ftrace_pid_trace)
- goto out;
-
- clear_ftrace_pid_task(&ftrace_pid_trace);
-
- } else {
- /* swapper task is special */
- if (!val) {
- pid = ftrace_swapper_pid;
- if (pid == ftrace_pid_trace)
- goto out;
- } else {
- pid = find_get_pid(val);
+ ret = ftrace_pid_add(val);
- if (pid == ftrace_pid_trace) {
- put_pid(pid);
- goto out;
- }
- }
-
- if (ftrace_pid_trace)
- clear_ftrace_pid_task(&ftrace_pid_trace);
-
- if (!pid)
- goto out;
-
- ftrace_pid_trace = pid;
-
- set_ftrace_pid_task(ftrace_pid_trace);
- }
-
- /* update the function call */
- ftrace_update_pid_func();
- ftrace_startup_enable(0);
+ return ret ? ret : cnt;
+}
- out:
- mutex_unlock(&ftrace_lock);
+static int
+ftrace_pid_release(struct inode *inode, struct file *file)
+{
+ if (file->f_mode & FMODE_READ)
+ seq_release(inode, file);
- return cnt;
+ return 0;
}
static const struct file_operations ftrace_pid_fops = {
- .read = ftrace_pid_read,
- .write = ftrace_pid_write,
+ .open = ftrace_pid_open,
+ .write = ftrace_pid_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = ftrace_pid_release,
};
static __init int ftrace_init_debugfs(void)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d4ff01970547..e43c928356ee 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -397,18 +397,21 @@ int ring_buffer_print_page_header(struct trace_seq *s)
int ret;
ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
- "offset:0;\tsize:%u;\n",
- (unsigned int)sizeof(field.time_stamp));
+ "offset:0;\tsize:%u;\tsigned:%u;\n",
+ (unsigned int)sizeof(field.time_stamp),
+ (unsigned int)is_signed_type(u64));
ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
- "offset:%u;\tsize:%u;\n",
+ "offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), commit),
- (unsigned int)sizeof(field.commit));
+ (unsigned int)sizeof(field.commit),
+ (unsigned int)is_signed_type(long));
ret = trace_seq_printf(s, "\tfield: char data;\t"
- "offset:%u;\tsize:%u;\n",
+ "offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), data),
- (unsigned int)BUF_PAGE_SIZE);
+ (unsigned int)BUF_PAGE_SIZE,
+ (unsigned int)is_signed_type(char));
return ret;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c820b0310a12..026e715a0c7a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -129,7 +129,7 @@ static int tracing_set_tracer(const char *buf);
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
static char *default_bootup_tracer;
-static int __init set_ftrace(char *str)
+static int __init set_cmdline_ftrace(char *str)
{
strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
default_bootup_tracer = bootup_tracer_buf;
@@ -137,7 +137,7 @@ static int __init set_ftrace(char *str)
ring_buffer_expanded = 1;
return 1;
}
-__setup("ftrace=", set_ftrace);
+__setup("ftrace=", set_cmdline_ftrace);
static int __init set_ftrace_dump_on_oops(char *str)
{
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 104c1a72418f..b4e4212e66d7 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -506,10 +506,6 @@ static inline int ftrace_graph_addr(unsigned long addr)
return 0;
}
#else
-static inline int ftrace_trace_addr(unsigned long addr)
-{
- return 1;
-}
static inline int ftrace_graph_addr(unsigned long addr)
{
return 1;
@@ -523,12 +519,12 @@ print_graph_function(struct trace_iterator *iter)
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-extern struct pid *ftrace_pid_trace;
+extern struct list_head ftrace_pids;
#ifdef CONFIG_FUNCTION_TRACER
static inline int ftrace_trace_task(struct task_struct *task)
{
- if (!ftrace_pid_trace)
+ if (list_empty(&ftrace_pids))
return 1;
return test_tsk_trace_trace(task);
@@ -710,7 +706,6 @@ struct event_filter {
int n_preds;
struct filter_pred **preds;
char *filter_string;
- bool no_reset;
};
struct event_subsystem {
@@ -722,22 +717,40 @@ struct event_subsystem {
};
struct filter_pred;
+struct regex;
typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event,
int val1, int val2);
+typedef int (*regex_match_func)(char *str, struct regex *r, int len);
+
+enum regex_type {
+ MATCH_FULL = 0,
+ MATCH_FRONT_ONLY,
+ MATCH_MIDDLE_ONLY,
+ MATCH_END_ONLY,
+};
+
+struct regex {
+ char pattern[MAX_FILTER_STR_VAL];
+ int len;
+ int field_len;
+ regex_match_func match;
+};
+
struct filter_pred {
- filter_pred_fn_t fn;
- u64 val;
- char str_val[MAX_FILTER_STR_VAL];
- int str_len;
- char *field_name;
- int offset;
- int not;
- int op;
- int pop_n;
+ filter_pred_fn_t fn;
+ u64 val;
+ struct regex regex;
+ char *field_name;
+ int offset;
+ int not;
+ int op;
+ int pop_n;
};
+extern enum regex_type
+filter_parse_regex(char *buff, int len, char **search, int *not);
extern void print_event_filter(struct ftrace_event_call *call,
struct trace_seq *s);
extern int apply_event_filter(struct ftrace_event_call *call,
@@ -753,7 +766,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event)
{
- if (unlikely(call->filter_active) && !filter_match_preds(call, rec)) {
+ if (unlikely(call->filter_active) &&
+ !filter_match_preds(call->filter, rec)) {
ring_buffer_discard_commit(buffer, event);
return 1;
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f2f5064701e5..1d18315dc836 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -503,7 +503,7 @@ extern char *__bad_type_size(void);
#define FIELD(type, name) \
sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
#type, "common_" #name, offsetof(typeof(field), name), \
- sizeof(field.name)
+ sizeof(field.name), is_signed_type(type)
static int trace_write_header(struct trace_seq *s)
{
@@ -511,17 +511,17 @@ static int trace_write_header(struct trace_seq *s)
/* struct trace_entry */
return trace_seq_printf(s,
- "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
- "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
- "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
- "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
- "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
- "\n",
- FIELD(unsigned short, type),
- FIELD(unsigned char, flags),
- FIELD(unsigned char, preempt_count),
- FIELD(int, pid),
- FIELD(int, lock_depth));
+ "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
+ "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
+ "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
+ "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
+ "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
+ "\n",
+ FIELD(unsigned short, type),
+ FIELD(unsigned char, flags),
+ FIELD(unsigned char, preempt_count),
+ FIELD(int, pid),
+ FIELD(int, lock_depth));
}
static ssize_t
@@ -874,9 +874,9 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
"'%s/filter' entry\n", name);
}
- entry = trace_create_file("enable", 0644, system->entry,
- (void *)system->name,
- &ftrace_system_enable_fops);
+ trace_create_file("enable", 0644, system->entry,
+ (void *)system->name,
+ &ftrace_system_enable_fops);
return system->entry;
}
@@ -888,7 +888,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
const struct file_operations *filter,
const struct file_operations *format)
{
- struct dentry *entry;
int ret;
/*
@@ -906,12 +905,12 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
}
if (call->regfunc)
- entry = trace_create_file("enable", 0644, call->dir, call,
- enable);
+ trace_create_file("enable", 0644, call->dir, call,
+ enable);
if (call->id && call->profile_enable)
- entry = trace_create_file("id", 0444, call->dir, call,
- id);
+ trace_create_file("id", 0444, call->dir, call,
+ id);
if (call->define_fields) {
ret = call->define_fields(call);
@@ -920,16 +919,16 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
" events/%s\n", call->name);
return ret;
}
- entry = trace_create_file("filter", 0644, call->dir, call,
- filter);
+ trace_create_file("filter", 0644, call->dir, call,
+ filter);
}
/* A trace may not want to export its format */
if (!call->show_format)
return 0;
- entry = trace_create_file("format", 0444, call->dir, call,
- format);
+ trace_create_file("format", 0444, call->dir, call,
+ format);
return 0;
}
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 98a6cc5c64ed..21d34757b955 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -18,11 +18,10 @@
* Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
*/
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/mutex.h>
+#include <linux/perf_event.h>
#include "trace.h"
#include "trace_output.h"
@@ -31,6 +30,7 @@ enum filter_op_ids
{
OP_OR,
OP_AND,
+ OP_GLOB,
OP_NE,
OP_EQ,
OP_LT,
@@ -48,16 +48,17 @@ struct filter_op {
};
static struct filter_op filter_ops[] = {
- { OP_OR, "||", 1 },
- { OP_AND, "&&", 2 },
- { OP_NE, "!=", 4 },
- { OP_EQ, "==", 4 },
- { OP_LT, "<", 5 },
- { OP_LE, "<=", 5 },
- { OP_GT, ">", 5 },
- { OP_GE, ">=", 5 },
- { OP_NONE, "OP_NONE", 0 },
- { OP_OPEN_PAREN, "(", 0 },
+ { OP_OR, "||", 1 },
+ { OP_AND, "&&", 2 },
+ { OP_GLOB, "~", 4 },
+ { OP_NE, "!=", 4 },
+ { OP_EQ, "==", 4 },
+ { OP_LT, "<", 5 },
+ { OP_LE, "<=", 5 },
+ { OP_GT, ">", 5 },
+ { OP_GE, ">=", 5 },
+ { OP_NONE, "OP_NONE", 0 },
+ { OP_OPEN_PAREN, "(", 0 },
};
enum {
@@ -197,9 +198,9 @@ static int filter_pred_string(struct filter_pred *pred, void *event,
char *addr = (char *)(event + pred->offset);
int cmp, match;
- cmp = strncmp(addr, pred->str_val, pred->str_len);
+ cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
- match = (!cmp) ^ pred->not;
+ match = cmp ^ pred->not;
return match;
}
@@ -211,9 +212,9 @@ static int filter_pred_pchar(struct filter_pred *pred, void *event,
char **addr = (char **)(event + pred->offset);
int cmp, match;
- cmp = strncmp(*addr, pred->str_val, pred->str_len);
+ cmp = pred->regex.match(*addr, &pred->regex, pred->regex.field_len);
- match = (!cmp) ^ pred->not;
+ match = cmp ^ pred->not;
return match;
}
@@ -237,9 +238,9 @@ static int filter_pred_strloc(struct filter_pred *pred, void *event,
char *addr = (char *)(event + str_loc);
int cmp, match;
- cmp = strncmp(addr, pred->str_val, str_len);
+ cmp = pred->regex.match(addr, &pred->regex, str_len);
- match = (!cmp) ^ pred->not;
+ match = cmp ^ pred->not;
return match;
}
@@ -250,10 +251,121 @@ static int filter_pred_none(struct filter_pred *pred, void *event,
return 0;
}
+/* Basic regex callbacks */
+static int regex_match_full(char *str, struct regex *r, int len)
+{
+ if (strncmp(str, r->pattern, len) == 0)
+ return 1;
+ return 0;
+}
+
+static int regex_match_front(char *str, struct regex *r, int len)
+{
+ if (strncmp(str, r->pattern, len) == 0)
+ return 1;
+ return 0;
+}
+
+static int regex_match_middle(char *str, struct regex *r, int len)
+{
+ if (strstr(str, r->pattern))
+ return 1;
+ return 0;
+}
+
+static int regex_match_end(char *str, struct regex *r, int len)
+{
+ char *ptr = strstr(str, r->pattern);
+
+ if (ptr && (ptr[r->len] == 0))
+ return 1;
+ return 0;
+}
+
+/**
+ * filter_parse_regex - parse a basic regex
+ * @buff: the raw regex
+ * @len: length of the regex
+ * @search: will point to the beginning of the string to compare
+ * @not: tell whether the match will have to be inverted
+ *
+ * This passes in a buffer containing a regex and this function will
+ * set search to point to the search part of the buffer and
+ * return the type of search it is (see enum above).
+ * This does modify buff.
+ *
+ * Returns enum type.
+ * search returns the pointer to use for comparison.
+ * not returns 1 if buff started with a '!'
+ * 0 otherwise.
+ */
+enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
+{
+ int type = MATCH_FULL;
+ int i;
+
+ if (buff[0] == '!') {
+ *not = 1;
+ buff++;
+ len--;
+ } else
+ *not = 0;
+
+ *search = buff;
+
+ for (i = 0; i < len; i++) {
+ if (buff[i] == '*') {
+ if (!i) {
+ *search = buff + 1;
+ type = MATCH_END_ONLY;
+ } else {
+ if (type == MATCH_END_ONLY)
+ type = MATCH_MIDDLE_ONLY;
+ else
+ type = MATCH_FRONT_ONLY;
+ buff[i] = 0;
+ break;
+ }
+ }
+ }
+
+ return type;
+}
+
+static void filter_build_regex(struct filter_pred *pred)
+{
+ struct regex *r = &pred->regex;
+ char *search;
+ enum regex_type type = MATCH_FULL;
+ int not = 0;
+
+ if (pred->op == OP_GLOB) {
+ type = filter_parse_regex(r->pattern, r->len, &search, &not);
+ r->len = strlen(search);
+ memmove(r->pattern, search, r->len+1);
+ }
+
+ switch (type) {
+ case MATCH_FULL:
+ r->match = regex_match_full;
+ break;
+ case MATCH_FRONT_ONLY:
+ r->match = regex_match_front;
+ break;
+ case MATCH_MIDDLE_ONLY:
+ r->match = regex_match_middle;
+ break;
+ case MATCH_END_ONLY:
+ r->match = regex_match_end;
+ break;
+ }
+
+ pred->not ^= not;
+}
+
/* return 1 if event matches, 0 otherwise (discard) */
-int filter_match_preds(struct ftrace_event_call *call, void *rec)
+int filter_match_preds(struct event_filter *filter, void *rec)
{
- struct event_filter *filter = call->filter;
int match, top = 0, val1 = 0, val2 = 0;
int stack[MAX_FILTER_PRED];
struct filter_pred *pred;
@@ -396,7 +508,7 @@ static void filter_clear_pred(struct filter_pred *pred)
{
kfree(pred->field_name);
pred->field_name = NULL;
- pred->str_len = 0;
+ pred->regex.len = 0;
}
static int filter_set_pred(struct filter_pred *dest,
@@ -426,9 +538,8 @@ static void filter_disable_preds(struct ftrace_event_call *call)
filter->preds[i]->fn = filter_pred_none;
}
-void destroy_preds(struct ftrace_event_call *call)
+static void __free_preds(struct event_filter *filter)
{
- struct event_filter *filter = call->filter;
int i;
if (!filter)
@@ -441,21 +552,24 @@ void destroy_preds(struct ftrace_event_call *call)
kfree(filter->preds);
kfree(filter->filter_string);
kfree(filter);
+}
+
+void destroy_preds(struct ftrace_event_call *call)
+{
+ __free_preds(call->filter);
call->filter = NULL;
+ call->filter_active = 0;
}
-static int init_preds(struct ftrace_event_call *call)
+static struct event_filter *__alloc_preds(void)
{
struct event_filter *filter;
struct filter_pred *pred;
int i;
- if (call->filter)
- return 0;
-
- filter = call->filter = kzalloc(sizeof(*filter), GFP_KERNEL);
- if (!call->filter)
- return -ENOMEM;
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return ERR_PTR(-ENOMEM);
filter->n_preds = 0;
@@ -471,12 +585,24 @@ static int init_preds(struct ftrace_event_call *call)
filter->preds[i] = pred;
}
- return 0;
+ return filter;
oom:
- destroy_preds(call);
+ __free_preds(filter);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int init_preds(struct ftrace_event_call *call)
+{
+ if (call->filter)
+ return 0;
- return -ENOMEM;
+ call->filter_active = 0;
+ call->filter = __alloc_preds();
+ if (IS_ERR(call->filter))
+ return PTR_ERR(call->filter);
+
+ return 0;
}
static int init_subsystem_preds(struct event_subsystem *system)
@@ -499,14 +625,7 @@ static int init_subsystem_preds(struct event_subsystem *system)
return 0;
}
-enum {
- FILTER_DISABLE_ALL,
- FILTER_INIT_NO_RESET,
- FILTER_SKIP_NO_RESET,
-};
-
-static void filter_free_subsystem_preds(struct event_subsystem *system,
- int flag)
+static void filter_free_subsystem_preds(struct event_subsystem *system)
{
struct ftrace_event_call *call;
@@ -517,14 +636,6 @@ static void filter_free_subsystem_preds(struct event_subsystem *system,
if (strcmp(call->system, system->name) != 0)
continue;
- if (flag == FILTER_INIT_NO_RESET) {
- call->filter->no_reset = false;
- continue;
- }
-
- if (flag == FILTER_SKIP_NO_RESET && call->filter->no_reset)
- continue;
-
filter_disable_preds(call);
remove_filter_string(call->filter);
}
@@ -532,10 +643,10 @@ static void filter_free_subsystem_preds(struct event_subsystem *system,
static int filter_add_pred_fn(struct filter_parse_state *ps,
struct ftrace_event_call *call,
+ struct event_filter *filter,
struct filter_pred *pred,
filter_pred_fn_t fn)
{
- struct event_filter *filter = call->filter;
int idx, err;
if (filter->n_preds == MAX_FILTER_PRED) {
@@ -550,7 +661,6 @@ static int filter_add_pred_fn(struct filter_parse_state *ps,
return err;
filter->n_preds++;
- call->filter_active = 1;
return 0;
}
@@ -575,7 +685,10 @@ static bool is_string_field(struct ftrace_event_field *field)
static int is_legal_op(struct ftrace_event_field *field, int op)
{
- if (is_string_field(field) && (op != OP_EQ && op != OP_NE))
+ if (is_string_field(field) &&
+ (op != OP_EQ && op != OP_NE && op != OP_GLOB))
+ return 0;
+ if (!is_string_field(field) && op == OP_GLOB)
return 0;
return 1;
@@ -626,6 +739,7 @@ static filter_pred_fn_t select_comparison_fn(int op, int field_size,
static int filter_add_pred(struct filter_parse_state *ps,
struct ftrace_event_call *call,
+ struct event_filter *filter,
struct filter_pred *pred,
bool dry_run)
{
@@ -660,21 +774,22 @@ static int filter_add_pred(struct filter_parse_state *ps,
}
if (is_string_field(field)) {
- pred->str_len = field->size;
+ filter_build_regex(pred);
- if (field->filter_type == FILTER_STATIC_STRING)
+ if (field->filter_type == FILTER_STATIC_STRING) {
fn = filter_pred_string;
- else if (field->filter_type == FILTER_DYN_STRING)
+ pred->regex.field_len = field->size;
+ } else if (field->filter_type == FILTER_DYN_STRING)
fn = filter_pred_strloc;
else {
fn = filter_pred_pchar;
- pred->str_len = strlen(pred->str_val);
+ pred->regex.field_len = strlen(pred->regex.pattern);
}
} else {
if (field->is_signed)
- ret = strict_strtoll(pred->str_val, 0, &val);
+ ret = strict_strtoll(pred->regex.pattern, 0, &val);
else
- ret = strict_strtoull(pred->str_val, 0, &val);
+ ret = strict_strtoull(pred->regex.pattern, 0, &val);
if (ret) {
parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
return -EINVAL;
@@ -694,45 +809,7 @@ static int filter_add_pred(struct filter_parse_state *ps,
add_pred_fn:
if (!dry_run)
- return filter_add_pred_fn(ps, call, pred, fn);
- return 0;
-}
-
-static int filter_add_subsystem_pred(struct filter_parse_state *ps,
- struct event_subsystem *system,
- struct filter_pred *pred,
- char *filter_string,
- bool dry_run)
-{
- struct ftrace_event_call *call;
- int err = 0;
- bool fail = true;
-
- list_for_each_entry(call, &ftrace_events, list) {
-
- if (!call->define_fields)
- continue;
-
- if (strcmp(call->system, system->name))
- continue;
-
- if (call->filter->no_reset)
- continue;
-
- err = filter_add_pred(ps, call, pred, dry_run);
- if (err)
- call->filter->no_reset = true;
- else
- fail = false;
-
- if (!dry_run)
- replace_filter_string(call->filter, filter_string);
- }
-
- if (fail) {
- parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
- return err;
- }
+ return filter_add_pred_fn(ps, call, filter, pred, fn);
return 0;
}
@@ -1045,8 +1122,8 @@ static struct filter_pred *create_pred(int op, char *operand1, char *operand2)
return NULL;
}
- strcpy(pred->str_val, operand2);
- pred->str_len = strlen(operand2);
+ strcpy(pred->regex.pattern, operand2);
+ pred->regex.len = strlen(pred->regex.pattern);
pred->op = op;
@@ -1090,8 +1167,8 @@ static int check_preds(struct filter_parse_state *ps)
return 0;
}
-static int replace_preds(struct event_subsystem *system,
- struct ftrace_event_call *call,
+static int replace_preds(struct ftrace_event_call *call,
+ struct event_filter *filter,
struct filter_parse_state *ps,
char *filter_string,
bool dry_run)
@@ -1138,11 +1215,7 @@ static int replace_preds(struct event_subsystem *system,
add_pred:
if (!pred)
return -ENOMEM;
- if (call)
- err = filter_add_pred(ps, call, pred, false);
- else
- err = filter_add_subsystem_pred(ps, system, pred,
- filter_string, dry_run);
+ err = filter_add_pred(ps, call, filter, pred, dry_run);
filter_free_pred(pred);
if (err)
return err;
@@ -1153,10 +1226,50 @@ add_pred:
return 0;
}
-int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
+static int replace_system_preds(struct event_subsystem *system,
+ struct filter_parse_state *ps,
+ char *filter_string)
{
+ struct event_filter *filter = system->filter;
+ struct ftrace_event_call *call;
+ bool fail = true;
int err;
+ list_for_each_entry(call, &ftrace_events, list) {
+
+ if (!call->define_fields)
+ continue;
+
+ if (strcmp(call->system, system->name) != 0)
+ continue;
+
+ /* try to see if the filter can be applied */
+ err = replace_preds(call, filter, ps, filter_string, true);
+ if (err)
+ continue;
+
+ /* really apply the filter */
+ filter_disable_preds(call);
+ err = replace_preds(call, filter, ps, filter_string, false);
+ if (err)
+ filter_disable_preds(call);
+ else {
+ call->filter_active = 1;
+ replace_filter_string(filter, filter_string);
+ }
+ fail = false;
+ }
+
+ if (fail) {
+ parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
+{
+ int err;
struct filter_parse_state *ps;
mutex_lock(&event_mutex);
@@ -1168,8 +1281,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
if (!strcmp(strstrip(filter_string), "0")) {
filter_disable_preds(call);
remove_filter_string(call->filter);
- mutex_unlock(&event_mutex);
- return 0;
+ goto out_unlock;
}
err = -ENOMEM;
@@ -1187,10 +1299,11 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
goto out;
}
- err = replace_preds(NULL, call, ps, filter_string, false);
+ err = replace_preds(call, call->filter, ps, filter_string, false);
if (err)
append_filter_err(ps, call->filter);
-
+ else
+ call->filter_active = 1;
out:
filter_opstack_clear(ps);
postfix_clear(ps);
@@ -1205,7 +1318,6 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
char *filter_string)
{
int err;
-
struct filter_parse_state *ps;
mutex_lock(&event_mutex);
@@ -1215,10 +1327,9 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
goto out_unlock;
if (!strcmp(strstrip(filter_string), "0")) {
- filter_free_subsystem_preds(system, FILTER_DISABLE_ALL);
+ filter_free_subsystem_preds(system);
remove_filter_string(system->filter);
- mutex_unlock(&event_mutex);
- return 0;
+ goto out_unlock;
}
err = -ENOMEM;
@@ -1235,31 +1346,87 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
goto out;
}
- filter_free_subsystem_preds(system, FILTER_INIT_NO_RESET);
-
- /* try to see the filter can be applied to which events */
- err = replace_preds(system, NULL, ps, filter_string, true);
- if (err) {
+ err = replace_system_preds(system, ps, filter_string);
+ if (err)
append_filter_err(ps, system->filter);
- goto out;
+
+out:
+ filter_opstack_clear(ps);
+ postfix_clear(ps);
+ kfree(ps);
+out_unlock:
+ mutex_unlock(&event_mutex);
+
+ return err;
+}
+
+#ifdef CONFIG_EVENT_PROFILE
+
+void ftrace_profile_free_filter(struct perf_event *event)
+{
+ struct event_filter *filter = event->filter;
+
+ event->filter = NULL;
+ __free_preds(filter);
+}
+
+int ftrace_profile_set_filter(struct perf_event *event, int event_id,
+ char *filter_str)
+{
+ int err;
+ struct event_filter *filter;
+ struct filter_parse_state *ps;
+ struct ftrace_event_call *call = NULL;
+
+ mutex_lock(&event_mutex);
+
+ list_for_each_entry(call, &ftrace_events, list) {
+ if (call->id == event_id)
+ break;
}
- filter_free_subsystem_preds(system, FILTER_SKIP_NO_RESET);
+ err = -EINVAL;
+ if (!call)
+ goto out_unlock;
- /* really apply the filter to the events */
- err = replace_preds(system, NULL, ps, filter_string, false);
- if (err) {
- append_filter_err(ps, system->filter);
- filter_free_subsystem_preds(system, 2);
+ err = -EEXIST;
+ if (event->filter)
+ goto out_unlock;
+
+ filter = __alloc_preds();
+ if (IS_ERR(filter)) {
+ err = PTR_ERR(filter);
+ goto out_unlock;
}
-out:
+ err = -ENOMEM;
+ ps = kzalloc(sizeof(*ps), GFP_KERNEL);
+ if (!ps)
+ goto free_preds;
+
+ parse_init(ps, filter_ops, filter_str);
+ err = filter_parse(ps);
+ if (err)
+ goto free_ps;
+
+ err = replace_preds(call, filter, ps, filter_str, false);
+ if (!err)
+ event->filter = filter;
+
+free_ps:
filter_opstack_clear(ps);
postfix_clear(ps);
kfree(ps);
+
+free_preds:
+ if (err)
+ __free_preds(filter);
+
out_unlock:
mutex_unlock(&event_mutex);
return err;
}
+#endif /* CONFIG_EVENT_PROFILE */
+
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index ed7d48083520..934d81fb4ca4 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -66,44 +66,47 @@ static void __used ____ftrace_check_##name(void) \
#undef __field
#define __field(type, item) \
ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
- "offset:%zu;\tsize:%zu;\n", \
+ "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
offsetof(typeof(field), item), \
- sizeof(field.item)); \
+ sizeof(field.item), is_signed_type(type)); \
if (!ret) \
return 0;
#undef __field_desc
#define __field_desc(type, container, item) \
ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
- "offset:%zu;\tsize:%zu;\n", \
+ "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
offsetof(typeof(field), container.item), \
- sizeof(field.container.item)); \
+ sizeof(field.container.item), \
+ is_signed_type(type)); \
if (!ret) \
return 0;
#undef __array
#define __array(type, item, len) \
ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
- "offset:%zu;\tsize:%zu;\n", \
- offsetof(typeof(field), item), \
- sizeof(field.item)); \
+ "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
+ offsetof(typeof(field), item), \
+ sizeof(field.item), is_signed_type(type)); \
if (!ret) \
return 0;
#undef __array_desc
#define __array_desc(type, container, item, len) \
ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
- "offset:%zu;\tsize:%zu;\n", \
+ "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
offsetof(typeof(field), container.item), \
- sizeof(field.container.item)); \
+ sizeof(field.container.item), \
+ is_signed_type(type)); \
if (!ret) \
return 0;
#undef __dynamic_array
#define __dynamic_array(type, item) \
ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
- "offset:%zu;\tsize:0;\n", \
- offsetof(typeof(field), item)); \
+ "offset:%zu;\tsize:0;\tsigned:%u;\n", \
+ offsetof(typeof(field), item), \
+ is_signed_type(type)); \
if (!ret) \
return 0;
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 9ade66389d5a..58b8e5370767 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -14,6 +14,69 @@ static int sys_refcount_exit;
static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
+extern unsigned long __start_syscalls_metadata[];
+extern unsigned long __stop_syscalls_metadata[];
+
+static struct syscall_metadata **syscalls_metadata;
+
+static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
+{
+ struct syscall_metadata *start;
+ struct syscall_metadata *stop;
+ char str[KSYM_SYMBOL_LEN];
+
+
+ start = (struct syscall_metadata *)__start_syscalls_metadata;
+ stop = (struct syscall_metadata *)__stop_syscalls_metadata;
+ kallsyms_lookup(syscall, NULL, NULL, NULL, str);
+
+ for ( ; start < stop; start++) {
+ /*
+ * Only compare after the "sys" prefix. Archs that use
+ * syscall wrappers may have syscalls symbols aliases prefixed
+ * with "SyS" instead of "sys", leading to an unwanted
+ * mismatch.
+ */
+ if (start->name && !strcmp(start->name + 3, str + 3))
+ return start;
+ }
+ return NULL;
+}
+
+static struct syscall_metadata *syscall_nr_to_meta(int nr)
+{
+ if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
+ return NULL;
+
+ return syscalls_metadata[nr];
+}
+
+int syscall_name_to_nr(char *name)
+{
+ int i;
+
+ if (!syscalls_metadata)
+ return -1;
+
+ for (i = 0; i < NR_syscalls; i++) {
+ if (syscalls_metadata[i]) {
+ if (!strcmp(syscalls_metadata[i]->name, name))
+ return i;
+ }
+ }
+ return -1;
+}
+
+void set_syscall_enter_id(int num, int id)
+{
+ syscalls_metadata[num]->enter_id = id;
+}
+
+void set_syscall_exit_id(int num, int id)
+{
+ syscalls_metadata[num]->exit_id = id;
+}
+
enum print_line_t
print_syscall_enter(struct trace_iterator *iter, int flags)
{
@@ -103,7 +166,8 @@ extern char *__bad_type_size(void);
#define SYSCALL_FIELD(type, name) \
sizeof(type) != sizeof(trace.name) ? \
__bad_type_size() : \
- #type, #name, offsetof(typeof(trace), name), sizeof(trace.name)
+ #type, #name, offsetof(typeof(trace), name), \
+ sizeof(trace.name), is_signed_type(type)
int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
{
@@ -120,7 +184,8 @@ int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
if (!entry)
return 0;
- ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
+ ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
+ "\tsigned:%u;\n",
SYSCALL_FIELD(int, nr));
if (!ret)
return 0;
@@ -130,8 +195,10 @@ int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
entry->args[i]);
if (!ret)
return 0;
- ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;\n", offset,
- sizeof(unsigned long));
+ ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;"
+ "\tsigned:%u;\n", offset,
+ sizeof(unsigned long),
+ is_signed_type(unsigned long));
if (!ret)
return 0;
offset += sizeof(unsigned long);
@@ -163,8 +230,10 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
struct syscall_trace_exit trace;
ret = trace_seq_printf(s,
- "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
- "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
+ "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
+ "\tsigned:%u;\n"
+ "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
+ "\tsigned:%u;\n",
SYSCALL_FIELD(int, nr),
SYSCALL_FIELD(long, ret));
if (!ret)
@@ -212,7 +281,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call)
if (ret)
return ret;
- ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 0,
+ ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
FILTER_OTHER);
return ret;
@@ -375,6 +444,29 @@ struct trace_event event_syscall_exit = {
.trace = print_syscall_exit,
};
+int __init init_ftrace_syscalls(void)
+{
+ struct syscall_metadata *meta;
+ unsigned long addr;
+ int i;
+
+ syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
+ NR_syscalls, GFP_KERNEL);
+ if (!syscalls_metadata) {
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < NR_syscalls; i++) {
+ addr = arch_syscall_addr(i);
+ meta = find_syscall_meta(addr);
+ syscalls_metadata[i] = meta;
+ }
+
+ return 0;
+}
+core_initcall(init_ftrace_syscalls);
+
#ifdef CONFIG_EVENT_PROFILE
static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 39f1029e3525..4ebfa5a164d7 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -5,10 +5,13 @@
* relegated to obsolescence, but used by various less
* important (or lazy) subsystems.
*/
-#include <linux/smp_lock.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/semaphore.h>
+#include <linux/smp_lock.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/bkl.h>
/*
* The 'big kernel lock'
@@ -113,21 +116,26 @@ static inline void __unlock_kernel(void)
* This cannot happen asynchronously, so we only need to
* worry about other CPU's.
*/
-void __lockfunc lock_kernel(void)
+void __lockfunc _lock_kernel(const char *func, const char *file, int line)
{
- int depth = current->lock_depth+1;
+ int depth = current->lock_depth + 1;
+
+ trace_lock_kernel(func, file, line);
+
if (likely(!depth))
__lock_kernel();
current->lock_depth = depth;
}
-void __lockfunc unlock_kernel(void)
+void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
{
BUG_ON(current->lock_depth < 0);
if (likely(--current->lock_depth < 0))
__unlock_kernel();
+
+ trace_unlock_kernel(func, file, line);
}
-EXPORT_SYMBOL(lock_kernel);
-EXPORT_SYMBOL(unlock_kernel);
+EXPORT_SYMBOL(_lock_kernel);
+EXPORT_SYMBOL(_unlock_kernel);
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 090d300d7394..bfb8b2cdd92a 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -119,6 +119,7 @@ my %text_sections = (
".sched.text" => 1,
".spinlock.text" => 1,
".irqentry.text" => 1,
+ ".text.unlikely" => 1,
);
$objdump = "objdump" if ((length $objdump) == 0);
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
index a7910099d6fd..4b1788355eca 100644
--- a/tools/perf/Documentation/perf-timechart.txt
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -31,9 +31,12 @@ OPTIONS
-w::
--width=::
Select the width of the SVG file (default: 1000)
--p::
+-P::
--power-only::
Only output the CPU power section of the diagram
+-p::
+--process::
+ Select the processes to display, by name or PID
SEE ALSO
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 1811a7015f9c..147e3cf035d3 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -201,7 +201,14 @@ EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wold-style-definition
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes
EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement
-CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS)
+ifeq ("$(origin DEBUG)", "command line")
+ PERF_DEBUG = $(DEBUG)
+endif
+ifndef PERF_DEBUG
+ CFLAGS_OPTIMIZE = -O6
+endif
+
+CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS)
LDFLAGS = -lpthread -lrt -lelf -lm
ALL_CFLAGS = $(CFLAGS)
ALL_LDFLAGS = $(LDFLAGS)
@@ -329,8 +336,26 @@ LIB_H += ../../include/linux/perf_event.h
LIB_H += ../../include/linux/rbtree.h
LIB_H += ../../include/linux/list.h
LIB_H += ../../include/linux/stringify.h
+LIB_H += util/include/linux/bitmap.h
+LIB_H += util/include/linux/bitops.h
+LIB_H += util/include/linux/compiler.h
+LIB_H += util/include/linux/ctype.h
+LIB_H += util/include/linux/kernel.h
LIB_H += util/include/linux/list.h
+LIB_H += util/include/linux/module.h
+LIB_H += util/include/linux/poison.h
+LIB_H += util/include/linux/prefetch.h
+LIB_H += util/include/linux/rbtree.h
+LIB_H += util/include/linux/string.h
+LIB_H += util/include/linux/types.h
+LIB_H += util/include/asm/asm-offsets.h
+LIB_H += util/include/asm/bitops.h
+LIB_H += util/include/asm/byteorder.h
+LIB_H += util/include/asm/swab.h
+LIB_H += util/include/asm/system.h
+LIB_H += util/include/asm/uaccess.h
LIB_H += perf.h
+LIB_H += util/event.h
LIB_H += util/types.h
LIB_H += util/levenshtein.h
LIB_H += util/parse-options.h
@@ -344,9 +369,12 @@ LIB_H += util/strlist.h
LIB_H += util/run-command.h
LIB_H += util/sigchain.h
LIB_H += util/symbol.h
-LIB_H += util/module.h
LIB_H += util/color.h
LIB_H += util/values.h
+LIB_H += util/sort.h
+LIB_H += util/hist.h
+LIB_H += util/thread.h
+LIB_H += util/data_map.h
LIB_OBJS += util/abspath.o
LIB_OBJS += util/alias.o
@@ -360,6 +388,9 @@ LIB_OBJS += util/parse-options.o
LIB_OBJS += util/parse-events.o
LIB_OBJS += util/path.o
LIB_OBJS += util/rbtree.o
+LIB_OBJS += util/bitmap.o
+LIB_OBJS += util/hweight.o
+LIB_OBJS += util/find_next_bit.o
LIB_OBJS += util/run-command.o
LIB_OBJS += util/quote.o
LIB_OBJS += util/strbuf.o
@@ -369,7 +400,6 @@ LIB_OBJS += util/usage.o
LIB_OBJS += util/wrapper.o
LIB_OBJS += util/sigchain.o
LIB_OBJS += util/symbol.o
-LIB_OBJS += util/module.o
LIB_OBJS += util/color.o
LIB_OBJS += util/pager.o
LIB_OBJS += util/header.o
@@ -382,6 +412,9 @@ LIB_OBJS += util/trace-event-parse.o
LIB_OBJS += util/trace-event-read.o
LIB_OBJS += util/trace-event-info.o
LIB_OBJS += util/svghelper.o
+LIB_OBJS += util/sort.o
+LIB_OBJS += util/hist.o
+LIB_OBJS += util/data_map.o
BUILTIN_OBJS += builtin-annotate.o
BUILTIN_OBJS += builtin-help.o
@@ -424,8 +457,12 @@ ifeq ($(uname_S),Darwin)
PTHREAD_LIBS =
endif
+ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y)
+ msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]);
+endif
+
ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y)
- msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]);
+ msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel);
endif
ifneq ($(shell sh -c "(echo '\#include <libdwarf/dwarf.h>'; echo '\#include <libdwarf/libdwarf.h>'; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y)
@@ -795,6 +832,19 @@ util/config.o: util/config.c PERF-CFLAGS
util/rbtree.o: ../../lib/rbtree.c PERF-CFLAGS
$(QUIET_CC)$(CC) -o util/rbtree.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+# some perf warning policies can't fit to lib/bitmap.c, eg: it warns about variable shadowing
+# from <string.h> that comes from kernel headers wrapping.
+KBITMAP_FLAGS=`echo $(ALL_CFLAGS) | sed s/-Wshadow// | sed s/-Wswitch-default// | sed s/-Wextra//`
+
+util/bitmap.o: ../../lib/bitmap.c PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o util/bitmap.o -c $(KBITMAP_FLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+
+util/hweight.o: ../../lib/hweight.c PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o util/hweight.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+
+util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+
perf-%$X: %.o $(PERFLIBS)
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 1ec741615814..6d63c2eea2c7 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -22,15 +22,13 @@
#include "util/parse-options.h"
#include "util/parse-events.h"
#include "util/thread.h"
+#include "util/sort.h"
+#include "util/hist.h"
static char const *input_name = "perf.data";
-static char default_sort_order[] = "comm,symbol";
-static char *sort_order = default_sort_order;
-
static int force;
static int input;
-static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
static int full_paths;
@@ -39,9 +37,10 @@ static int print_line;
static unsigned long page_size;
static unsigned long mmap_window = 32;
-static struct rb_root threads;
-static struct thread *last_match;
-
+struct sym_hist {
+ u64 sum;
+ u64 ip[0];
+};
struct sym_ext {
struct rb_node node;
@@ -49,247 +48,33 @@ struct sym_ext {
char *path;
};
-/*
- * histogram, sorted on item, collects counts
- */
-
-static struct rb_root hist;
-
-struct hist_entry {
- struct rb_node rb_node;
-
- struct thread *thread;
- struct map *map;
- struct dso *dso;
- struct symbol *sym;
- u64 ip;
- char level;
-
- uint32_t count;
-};
-
-/*
- * configurable sorting bits
- */
-
-struct sort_entry {
- struct list_head list;
-
- const char *header;
-
- int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
- int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
- size_t (*print)(FILE *fp, struct hist_entry *);
-};
-
-/* --sort pid */
-
-static int64_t
-sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
-{
- return right->thread->pid - left->thread->pid;
-}
-
-static size_t
-sort__thread_print(FILE *fp, struct hist_entry *self)
-{
- return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid);
-}
-
-static struct sort_entry sort_thread = {
- .header = " Command: Pid",
- .cmp = sort__thread_cmp,
- .print = sort__thread_print,
-};
-
-/* --sort comm */
-
-static int64_t
-sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
-{
- return right->thread->pid - left->thread->pid;
-}
-
-static int64_t
-sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
-{
- char *comm_l = left->thread->comm;
- char *comm_r = right->thread->comm;
-
- if (!comm_l || !comm_r) {
- if (!comm_l && !comm_r)
- return 0;
- else if (!comm_l)
- return -1;
- else
- return 1;
- }
-
- return strcmp(comm_l, comm_r);
-}
-
-static size_t
-sort__comm_print(FILE *fp, struct hist_entry *self)
-{
- return fprintf(fp, "%16s", self->thread->comm);
-}
-
-static struct sort_entry sort_comm = {
- .header = " Command",
- .cmp = sort__comm_cmp,
- .collapse = sort__comm_collapse,
- .print = sort__comm_print,
-};
-
-/* --sort dso */
-
-static int64_t
-sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
-{
- struct dso *dso_l = left->dso;
- struct dso *dso_r = right->dso;
-
- if (!dso_l || !dso_r) {
- if (!dso_l && !dso_r)
- return 0;
- else if (!dso_l)
- return -1;
- else
- return 1;
- }
-
- return strcmp(dso_l->name, dso_r->name);
-}
-
-static size_t
-sort__dso_print(FILE *fp, struct hist_entry *self)
-{
- if (self->dso)
- return fprintf(fp, "%-25s", self->dso->name);
-
- return fprintf(fp, "%016llx ", (u64)self->ip);
-}
-
-static struct sort_entry sort_dso = {
- .header = "Shared Object ",
- .cmp = sort__dso_cmp,
- .print = sort__dso_print,
-};
-
-/* --sort symbol */
-
-static int64_t
-sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
-{
- u64 ip_l, ip_r;
-
- if (left->sym == right->sym)
- return 0;
-
- ip_l = left->sym ? left->sym->start : left->ip;
- ip_r = right->sym ? right->sym->start : right->ip;
-
- return (int64_t)(ip_r - ip_l);
-}
-
-static size_t
-sort__sym_print(FILE *fp, struct hist_entry *self)
-{
- size_t ret = 0;
-
- if (verbose)
- ret += fprintf(fp, "%#018llx ", (u64)self->ip);
-
- if (self->sym) {
- ret += fprintf(fp, "[%c] %s",
- self->dso == kernel_dso ? 'k' : '.', self->sym->name);
- } else {
- ret += fprintf(fp, "%#016llx", (u64)self->ip);
- }
-
- return ret;
-}
-
-static struct sort_entry sort_sym = {
- .header = "Symbol",
- .cmp = sort__sym_cmp,
- .print = sort__sym_print,
-};
-
-static int sort__need_collapse = 0;
-
-struct sort_dimension {
- const char *name;
- struct sort_entry *entry;
- int taken;
-};
-
-static struct sort_dimension sort_dimensions[] = {
- { .name = "pid", .entry = &sort_thread, },
- { .name = "comm", .entry = &sort_comm, },
- { .name = "dso", .entry = &sort_dso, },
- { .name = "symbol", .entry = &sort_sym, },
+struct sym_priv {
+ struct sym_hist *hist;
+ struct sym_ext *ext;
};
-static LIST_HEAD(hist_entry__sort_list);
+static const char *sym_hist_filter;
-static int sort_dimension__add(char *tok)
+static int symbol_filter(struct map *map, struct symbol *sym)
{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
- struct sort_dimension *sd = &sort_dimensions[i];
-
- if (sd->taken)
- continue;
-
- if (strncasecmp(tok, sd->name, strlen(tok)))
- continue;
-
- if (sd->entry->collapse)
- sort__need_collapse = 1;
-
- list_add_tail(&sd->entry->list, &hist_entry__sort_list);
- sd->taken = 1;
+ if (sym_hist_filter == NULL ||
+ strcmp(sym->name, sym_hist_filter) == 0) {
+ struct sym_priv *priv = dso__sym_priv(map->dso, sym);
+ const int size = (sizeof(*priv->hist) +
+ (sym->end - sym->start) * sizeof(u64));
+ priv->hist = malloc(size);
+ if (priv->hist)
+ memset(priv->hist, 0, size);
return 0;
}
-
- return -ESRCH;
-}
-
-static int64_t
-hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
-{
- struct sort_entry *se;
- int64_t cmp = 0;
-
- list_for_each_entry(se, &hist_entry__sort_list, list) {
- cmp = se->cmp(left, right);
- if (cmp)
- break;
- }
-
- return cmp;
-}
-
-static int64_t
-hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
-{
- struct sort_entry *se;
- int64_t cmp = 0;
-
- list_for_each_entry(se, &hist_entry__sort_list, list) {
- int64_t (*f)(struct hist_entry *, struct hist_entry *);
-
- f = se->collapse ?: se->cmp;
-
- cmp = f(left, right);
- if (cmp)
- break;
- }
-
- return cmp;
+ /*
+ * FIXME: We should really filter it out, as we don't want to go thru symbols
+ * we're not interested, and if a DSO ends up with no symbols, delete it too,
+ * but right now the kernel loading routines in symbol.c bail out if no symbols
+ * are found, fix it later.
+ */
+ return 0;
}
/*
@@ -299,196 +84,60 @@ static void hist_hit(struct hist_entry *he, u64 ip)
{
unsigned int sym_size, offset;
struct symbol *sym = he->sym;
+ struct sym_priv *priv;
+ struct sym_hist *h;
he->count++;
- if (!sym || !sym->hist)
+ if (!sym || !he->map)
+ return;
+
+ priv = dso__sym_priv(he->map->dso, sym);
+ if (!priv->hist)
return;
sym_size = sym->end - sym->start;
offset = ip - sym->start;
+ if (verbose)
+ fprintf(stderr, "%s: ip=%Lx\n", __func__,
+ he->map->unmap_ip(he->map, ip));
+
if (offset >= sym_size)
return;
- sym->hist_sum++;
- sym->hist[offset]++;
+ h = priv->hist;
+ h->sum++;
+ h->ip[offset]++;
if (verbose >= 3)
printf("%p %s: count++ [ip: %p, %08Lx] => %Ld\n",
(void *)(unsigned long)he->sym->start,
he->sym->name,
(void *)(unsigned long)ip, ip - he->sym->start,
- sym->hist[offset]);
+ h->ip[offset]);
}
-static int
-hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
- struct symbol *sym, u64 ip, char level)
+static int hist_entry__add(struct thread *thread, struct map *map,
+ struct symbol *sym, u64 ip, u64 count, char level)
{
- struct rb_node **p = &hist.rb_node;
- struct rb_node *parent = NULL;
- struct hist_entry *he;
- struct hist_entry entry = {
- .thread = thread,
- .map = map,
- .dso = dso,
- .sym = sym,
- .ip = ip,
- .level = level,
- .count = 1,
- };
- int cmp;
-
- while (*p != NULL) {
- parent = *p;
- he = rb_entry(parent, struct hist_entry, rb_node);
-
- cmp = hist_entry__cmp(&entry, he);
-
- if (!cmp) {
- hist_hit(he, ip);
-
- return 0;
- }
-
- if (cmp < 0)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
-
- he = malloc(sizeof(*he));
- if (!he)
+ bool hit;
+ struct hist_entry *he = __hist_entry__add(thread, map, sym, NULL, ip,
+ count, level, &hit);
+ if (he == NULL)
return -ENOMEM;
- *he = entry;
- rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, &hist);
-
+ hist_hit(he, ip);
return 0;
}
-static void hist_entry__free(struct hist_entry *he)
-{
- free(he);
-}
-
-/*
- * collapse the histogram
- */
-
-static struct rb_root collapse_hists;
-
-static void collapse__insert_entry(struct hist_entry *he)
-{
- struct rb_node **p = &collapse_hists.rb_node;
- struct rb_node *parent = NULL;
- struct hist_entry *iter;
- int64_t cmp;
-
- while (*p != NULL) {
- parent = *p;
- iter = rb_entry(parent, struct hist_entry, rb_node);
-
- cmp = hist_entry__collapse(iter, he);
-
- if (!cmp) {
- iter->count += he->count;
- hist_entry__free(he);
- return;
- }
-
- if (cmp < 0)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
-
- rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, &collapse_hists);
-}
-
-static void collapse__resort(void)
-{
- struct rb_node *next;
- struct hist_entry *n;
-
- if (!sort__need_collapse)
- return;
-
- next = rb_first(&hist);
- while (next) {
- n = rb_entry(next, struct hist_entry, rb_node);
- next = rb_next(&n->rb_node);
-
- rb_erase(&n->rb_node, &hist);
- collapse__insert_entry(n);
- }
-}
-
-/*
- * reverse the map, sort on count.
- */
-
-static struct rb_root output_hists;
-
-static void output__insert_entry(struct hist_entry *he)
-{
- struct rb_node **p = &output_hists.rb_node;
- struct rb_node *parent = NULL;
- struct hist_entry *iter;
-
- while (*p != NULL) {
- parent = *p;
- iter = rb_entry(parent, struct hist_entry, rb_node);
-
- if (he->count > iter->count)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
-
- rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, &output_hists);
-}
-
-static void output__resort(void)
-{
- struct rb_node *next;
- struct hist_entry *n;
- struct rb_root *tree = &hist;
-
- if (sort__need_collapse)
- tree = &collapse_hists;
-
- next = rb_first(tree);
-
- while (next) {
- n = rb_entry(next, struct hist_entry, rb_node);
- next = rb_next(&n->rb_node);
-
- rb_erase(&n->rb_node, tree);
- output__insert_entry(n);
- }
-}
-
-static unsigned long total = 0,
- total_mmap = 0,
- total_comm = 0,
- total_fork = 0,
- total_unknown = 0;
-
static int
process_sample_event(event_t *event, unsigned long offset, unsigned long head)
{
char level;
- int show = 0;
- struct dso *dso = NULL;
- struct thread *thread;
u64 ip = event->ip.ip;
struct map *map = NULL;
-
- thread = threads__findnew(event->ip.pid, &threads, &last_match);
+ struct symbol *sym = NULL;
+ struct thread *thread = threads__findnew(event->ip.pid);
dump_printf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
(void *)(offset + head),
@@ -497,60 +146,53 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
event->ip.pid,
(void *)(long)ip);
- dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
-
if (thread == NULL) {
fprintf(stderr, "problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+
if (event->header.misc & PERF_RECORD_MISC_KERNEL) {
- show = SHOW_KERNEL;
level = 'k';
-
- dso = kernel_dso;
-
- dump_printf(" ...... dso: %s\n", dso->name);
-
+ sym = kernel_maps__find_symbol(ip, &map);
+ dump_printf(" ...... dso: %s\n",
+ map ? map->dso->long_name : "<not found>");
} else if (event->header.misc & PERF_RECORD_MISC_USER) {
-
- show = SHOW_USER;
level = '.';
-
map = thread__find_map(thread, ip);
if (map != NULL) {
+got_map:
ip = map->map_ip(map, ip);
- dso = map->dso;
+ sym = map->dso->find_symbol(map->dso, ip);
} else {
/*
* If this is outside of all known maps,
* and is a negative address, try to look it
* up in the kernel dso, as it might be a
- * vsyscall (which executes in user-mode):
+ * vsyscall or vdso (which executes in user-mode).
+ *
+ * XXX This is nasty, we should have a symbol list in
+ * the "[vdso]" dso, but for now lets use the old
+ * trick of looking in the whole kernel symbol list.
*/
- if ((long long)ip < 0)
- dso = kernel_dso;
+ if ((long long)ip < 0) {
+ map = kernel_map;
+ goto got_map;
+ }
}
- dump_printf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
-
+ dump_printf(" ...... dso: %s\n",
+ map ? map->dso->long_name : "<not found>");
} else {
- show = SHOW_HV;
level = 'H';
dump_printf(" ...... dso: [hypervisor]\n");
}
- if (show & show_mask) {
- struct symbol *sym = NULL;
-
- if (dso)
- sym = dso->find_symbol(dso, ip);
-
- if (hist_entry__add(thread, map, dso, sym, ip, level)) {
- fprintf(stderr,
- "problem incrementing symbol count, skipping event\n");
- return -1;
- }
+ if (hist_entry__add(thread, map, sym, ip, 1, level)) {
+ fprintf(stderr, "problem incrementing symbol count, "
+ "skipping event\n");
+ return -1;
}
total++;
@@ -560,10 +202,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
{
- struct thread *thread;
- struct map *map = map__new(&event->mmap, NULL, 0);
-
- thread = threads__findnew(event->mmap.pid, &threads, &last_match);
+ struct map *map = map__new(&event->mmap, NULL, 0,
+ sizeof(struct sym_priv), symbol_filter);
+ struct thread *thread = threads__findnew(event->mmap.pid);
dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n",
(void *)(offset + head),
@@ -588,9 +229,8 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_comm_event(event_t *event, unsigned long offset, unsigned long head)
{
- struct thread *thread;
+ struct thread *thread = threads__findnew(event->comm.pid);
- thread = threads__findnew(event->comm.pid, &threads, &last_match);
dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
@@ -609,11 +249,9 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_fork_event(event_t *event, unsigned long offset, unsigned long head)
{
- struct thread *thread;
- struct thread *parent;
+ struct thread *thread = threads__findnew(event->fork.pid);
+ struct thread *parent = threads__findnew(event->fork.ppid);
- thread = threads__findnew(event->fork.pid, &threads, &last_match);
- parent = threads__findnew(event->fork.ppid, &threads, &last_match);
dump_printf("%p [%p]: PERF_RECORD_FORK: %d:%d\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
@@ -665,14 +303,15 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
return 0;
}
-static int
-parse_line(FILE *file, struct symbol *sym, u64 start, u64 len)
+static int parse_line(FILE *file, struct hist_entry *he, u64 len)
{
+ struct symbol *sym = he->sym;
char *line = NULL, *tmp, *tmp2;
static const char *prev_line;
static const char *prev_color;
unsigned int offset;
size_t line_len;
+ u64 start;
s64 line_ip;
int ret;
char *c;
@@ -709,22 +348,26 @@ parse_line(FILE *file, struct symbol *sym, u64 start, u64 len)
line_ip = -1;
}
+ start = he->map->unmap_ip(he->map, sym->start);
+
if (line_ip != -1) {
const char *path = NULL;
unsigned int hits = 0;
double percent = 0.0;
const char *color;
- struct sym_ext *sym_ext = sym->priv;
+ struct sym_priv *priv = dso__sym_priv(he->map->dso, sym);
+ struct sym_ext *sym_ext = priv->ext;
+ struct sym_hist *h = priv->hist;
offset = line_ip - start;
if (offset < len)
- hits = sym->hist[offset];
+ hits = h->ip[offset];
if (offset < len && sym_ext) {
path = sym_ext[offset].path;
percent = sym_ext[offset].percent;
- } else if (sym->hist_sum)
- percent = 100.0 * hits / sym->hist_sum;
+ } else if (h->sum)
+ percent = 100.0 * hits / h->sum;
color = get_percent_color(percent);
@@ -777,9 +420,10 @@ static void insert_source_line(struct sym_ext *sym_ext)
rb_insert_color(&sym_ext->node, &root_sym_ext);
}
-static void free_source_line(struct symbol *sym, int len)
+static void free_source_line(struct hist_entry *he, int len)
{
- struct sym_ext *sym_ext = sym->priv;
+ struct sym_priv *priv = dso__sym_priv(he->map->dso, he->sym);
+ struct sym_ext *sym_ext = priv->ext;
int i;
if (!sym_ext)
@@ -789,26 +433,30 @@ static void free_source_line(struct symbol *sym, int len)
free(sym_ext[i].path);
free(sym_ext);
- sym->priv = NULL;
+ priv->ext = NULL;
root_sym_ext = RB_ROOT;
}
/* Get the filename:line for the colored entries */
static void
-get_source_line(struct symbol *sym, u64 start, int len, const char *filename)
+get_source_line(struct hist_entry *he, int len, const char *filename)
{
+ struct symbol *sym = he->sym;
+ u64 start;
int i;
char cmd[PATH_MAX * 2];
struct sym_ext *sym_ext;
+ struct sym_priv *priv = dso__sym_priv(he->map->dso, sym);
+ struct sym_hist *h = priv->hist;
- if (!sym->hist_sum)
+ if (!h->sum)
return;
- sym->priv = calloc(len, sizeof(struct sym_ext));
- if (!sym->priv)
+ sym_ext = priv->ext = calloc(len, sizeof(struct sym_ext));
+ if (!priv->ext)
return;
- sym_ext = sym->priv;
+ start = he->map->unmap_ip(he->map, sym->start);
for (i = 0; i < len; i++) {
char *path = NULL;
@@ -816,7 +464,7 @@ get_source_line(struct symbol *sym, u64 start, int len, const char *filename)
u64 offset;
FILE *fp;
- sym_ext[i].percent = 100.0 * sym->hist[i] / sym->hist_sum;
+ sym_ext[i].percent = 100.0 * h->ip[i] / h->sum;
if (sym_ext[i].percent <= 0.5)
continue;
@@ -870,33 +518,34 @@ static void print_summary(const char *filename)
}
}
-static void annotate_sym(struct dso *dso, struct symbol *sym)
+static void annotate_sym(struct hist_entry *he)
{
- const char *filename = dso->name, *d_filename;
- u64 start, end, len;
+ struct map *map = he->map;
+ struct dso *dso = map->dso;
+ struct symbol *sym = he->sym;
+ const char *filename = dso->long_name, *d_filename;
+ u64 len;
char command[PATH_MAX*2];
FILE *file;
if (!filename)
return;
- if (sym->module)
- filename = sym->module->path;
- else if (dso == kernel_dso)
- filename = vmlinux_name;
-
- start = sym->obj_start;
- if (!start)
- start = sym->start;
+
+ if (verbose)
+ fprintf(stderr, "%s: filename=%s, sym=%s, start=%Lx, end=%Lx\n",
+ __func__, filename, sym->name,
+ map->unmap_ip(map, sym->start),
+ map->unmap_ip(map, sym->end));
+
if (full_paths)
d_filename = filename;
else
d_filename = basename(filename);
- end = start + sym->end - sym->start + 1;
len = sym->end - sym->start;
if (print_line) {
- get_source_line(sym, start, len, filename);
+ get_source_line(he, len, filename);
print_summary(filename);
}
@@ -905,10 +554,12 @@ static void annotate_sym(struct dso *dso, struct symbol *sym)
printf("------------------------------------------------\n");
if (verbose >= 2)
- printf("annotating [%p] %30s : [%p] %30s\n", dso, dso->name, sym, sym->name);
+ printf("annotating [%p] %30s : [%p] %30s\n",
+ dso, dso->long_name, sym, sym->name);
sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s",
- (u64)start, (u64)end, filename, filename);
+ map->unmap_ip(map, sym->start), map->unmap_ip(map, sym->end),
+ filename, filename);
if (verbose >= 3)
printf("doing: %s\n", command);
@@ -918,35 +569,38 @@ static void annotate_sym(struct dso *dso, struct symbol *sym)
return;
while (!feof(file)) {
- if (parse_line(file, sym, start, len) < 0)
+ if (parse_line(file, he, len) < 0)
break;
}
pclose(file);
if (print_line)
- free_source_line(sym, len);
+ free_source_line(he, len);
}
static void find_annotations(void)
{
struct rb_node *nd;
- struct dso *dso;
- int count = 0;
- list_for_each_entry(dso, &dsos, node) {
+ for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
+ struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
+ struct sym_priv *priv;
- for (nd = rb_first(&dso->syms); nd; nd = rb_next(nd)) {
- struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
+ if (he->sym == NULL)
+ continue;
- if (sym->hist) {
- annotate_sym(dso, sym);
- count++;
- }
- }
- }
+ priv = dso__sym_priv(he->map->dso, he->sym);
+ if (priv->hist == NULL)
+ continue;
- if (!count)
- printf(" Error: symbol '%s' not present amongst the samples.\n", sym_hist_filter);
+ annotate_sym(he);
+ /*
+ * Since we have a hist_entry per IP for the same symbol, free
+ * he->sym->hist to signal we already processed this symbol.
+ */
+ free(priv->hist);
+ priv->hist = NULL;
+ }
}
static int __cmd_annotate(void)
@@ -959,7 +613,7 @@ static int __cmd_annotate(void)
uint32_t size;
char *buf;
- register_idle_thread(&threads, &last_match);
+ register_idle_thread();
input = open(input_name, O_RDONLY);
if (input < 0) {
@@ -983,7 +637,7 @@ static int __cmd_annotate(void)
exit(0);
}
- if (load_kernel() < 0) {
+ if (load_kernel(sizeof(struct sym_priv), symbol_filter) < 0) {
perror("failed to load kernel symbols");
return EXIT_FAILURE;
}
@@ -1059,14 +713,14 @@ more:
if (dump_trace)
return 0;
- if (verbose >= 3)
- threads__fprintf(stdout, &threads);
+ if (verbose > 3)
+ threads__fprintf(stdout);
- if (verbose >= 2)
+ if (verbose > 2)
dsos__fprintf(stdout);
collapse__resort();
- output__resort();
+ output__resort(total);
find_annotations();
@@ -1134,10 +788,13 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
sym_hist_filter = argv[0];
}
- if (!sym_hist_filter)
- usage_with_options(annotate_usage, options);
-
setup_pager();
+ if (field_sep && *field_sep == '.') {
+ fputs("'.' is the only non valid --field-separator argument\n",
+ stderr);
+ exit(129);
+ }
+
return __cmd_annotate();
}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 3eeef339c787..ac5ddfff4456 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -17,55 +17,51 @@
#include "util/header.h"
#include "util/event.h"
#include "util/debug.h"
-#include "util/trace-event.h"
#include <unistd.h>
#include <sched.h>
-#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1)
-#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
-
static int fd[MAX_NR_CPUS][MAX_COUNTERS];
-static long default_interval = 100000;
+static long default_interval = 0;
-static int nr_cpus = 0;
+static int nr_cpus = 0;
static unsigned int page_size;
-static unsigned int mmap_pages = 128;
-static int freq = 0;
+static unsigned int mmap_pages = 128;
+static int freq = 1000;
static int output;
static const char *output_name = "perf.data";
-static int group = 0;
-static unsigned int realtime_prio = 0;
-static int raw_samples = 0;
-static int system_wide = 0;
-static int profile_cpu = -1;
-static pid_t target_pid = -1;
-static pid_t child_pid = -1;
-static int inherit = 1;
-static int force = 0;
-static int append_file = 0;
-static int call_graph = 0;
-static int inherit_stat = 0;
-static int no_samples = 0;
-static int sample_address = 0;
-static int multiplex = 0;
-static int multiplex_fd = -1;
-
-static long samples;
+static int group = 0;
+static unsigned int realtime_prio = 0;
+static int raw_samples = 0;
+static int system_wide = 0;
+static int profile_cpu = -1;
+static pid_t target_pid = -1;
+static pid_t child_pid = -1;
+static int inherit = 1;
+static int force = 0;
+static int append_file = 0;
+static int call_graph = 0;
+static int inherit_stat = 0;
+static int no_samples = 0;
+static int sample_address = 0;
+static int multiplex = 0;
+static int multiplex_fd = -1;
+
+static long samples = 0;
static struct timeval last_read;
static struct timeval this_read;
-static u64 bytes_written;
+static u64 bytes_written = 0;
static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
-static int nr_poll;
-static int nr_cpu;
+static int nr_poll = 0;
+static int nr_cpu = 0;
-static int file_new = 1;
+static int file_new = 1;
-struct perf_header *header;
+struct perf_header *header = NULL;
struct mmap_data {
int counter;
@@ -375,9 +371,11 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n
static void create_counter(int counter, int cpu, pid_t pid)
{
+ char *filter = filters[counter];
struct perf_event_attr *attr = attrs + counter;
struct perf_header_attr *h_attr;
int track = !counter; /* only the first counter needs these */
+ int ret;
struct {
u64 count;
u64 time_enabled;
@@ -480,7 +478,6 @@ try_again:
multiplex_fd = fd[nr_cpu][counter];
if (multiplex && fd[nr_cpu][counter] != multiplex_fd) {
- int ret;
ret = ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd);
assert(ret != -1);
@@ -500,6 +497,16 @@ try_again:
}
}
+ if (filter != NULL) {
+ ret = ioctl(fd[nr_cpu][counter],
+ PERF_EVENT_IOC_SET_FILTER, filter);
+ if (ret) {
+ error("failed to set filter with %d (%s)\n", errno,
+ strerror(errno));
+ exit(-1);
+ }
+ }
+
ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE);
}
@@ -566,17 +573,17 @@ static int __cmd_record(int argc, const char **argv)
else
header = perf_header__new();
-
if (raw_samples) {
- read_tracing_data(attrs, nr_counters);
+ perf_header__feat_trace_info(header);
} else {
for (i = 0; i < nr_counters; i++) {
if (attrs[i].sample_type & PERF_SAMPLE_RAW) {
- read_tracing_data(attrs, nr_counters);
+ perf_header__feat_trace_info(header);
break;
}
}
}
+
atexit(atexit_header);
if (!system_wide) {
@@ -623,7 +630,7 @@ static int __cmd_record(int argc, const char **argv)
param.sched_priority = realtime_prio;
if (sched_setscheduler(0, SCHED_FIFO, &param)) {
- printf("Could not set realtime priority.\n");
+ pr_err("Could not set realtime priority.\n");
exit(-1);
}
}
@@ -677,6 +684,8 @@ static const struct option options[] = {
OPT_CALLBACK('e', "event", NULL, "event",
"event selector. use 'perf list' to list available events",
parse_events),
+ OPT_CALLBACK(0, "filter", NULL, "filter",
+ "event filter", parse_filter),
OPT_INTEGER('p', "pid", &target_pid,
"record events on existing pid"),
OPT_INTEGER('r', "realtime", &realtime_prio,
@@ -731,6 +740,18 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
attrs[0].config = PERF_COUNT_HW_CPU_CYCLES;
}
+ /*
+ * User specified count overrides default frequency.
+ */
+ if (default_interval)
+ freq = 0;
+ else if (freq) {
+ default_interval = freq;
+ } else {
+ fprintf(stderr, "frequency and count are zero, aborting\n");
+ exit(EXIT_FAILURE);
+ }
+
for (counter = 0; counter < nr_counters; counter++) {
if (attrs[counter].sample_period)
continue;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 19669c20088e..b3d814b54555 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -26,20 +26,18 @@
#include "util/parse-options.h"
#include "util/parse-events.h"
+#include "util/data_map.h"
#include "util/thread.h"
+#include "util/sort.h"
+#include "util/hist.h"
static char const *input_name = "perf.data";
-static char default_sort_order[] = "comm,dso,symbol";
-static char *sort_order = default_sort_order;
static char *dso_list_str, *comm_list_str, *sym_list_str,
*col_width_list_str;
static struct strlist *dso_list, *comm_list, *sym_list;
-static char *field_sep;
static int force;
-static int input;
-static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
static int full_paths;
static int show_nr_samples;
@@ -50,374 +48,39 @@ static struct perf_read_values show_threads_values;
static char default_pretty_printing_style[] = "normal";
static char *pretty_printing_style = default_pretty_printing_style;
-static unsigned long page_size;
-static unsigned long mmap_window = 32;
-
-static char default_parent_pattern[] = "^sys_|^do_page_fault";
-static char *parent_pattern = default_parent_pattern;
-static regex_t parent_regex;
-
static int exclude_other = 1;
static char callchain_default_opt[] = "fractal,0.5";
-static int callchain;
-
-static char __cwd[PATH_MAX];
-static char *cwd = __cwd;
+static char *cwd;
static int cwdlen;
-static struct rb_root threads;
-static struct thread *last_match;
-
static struct perf_header *header;
-static
-struct callchain_param callchain_param = {
- .mode = CHAIN_GRAPH_REL,
- .min_percent = 0.5
-};
-
static u64 sample_type;
-static int repsep_fprintf(FILE *fp, const char *fmt, ...)
-{
- int n;
- va_list ap;
-
- va_start(ap, fmt);
- if (!field_sep)
- n = vfprintf(fp, fmt, ap);
- else {
- char *bf = NULL;
- n = vasprintf(&bf, fmt, ap);
- if (n > 0) {
- char *sep = bf;
-
- while (1) {
- sep = strchr(sep, *field_sep);
- if (sep == NULL)
- break;
- *sep = '.';
- }
- }
- fputs(bf, fp);
- free(bf);
- }
- va_end(ap);
- return n;
-}
-
-static unsigned int dsos__col_width,
- comms__col_width,
- threads__col_width;
-
-/*
- * histogram, sorted on item, collects counts
- */
-
-static struct rb_root hist;
-
-struct hist_entry {
- struct rb_node rb_node;
-
- struct thread *thread;
- struct map *map;
- struct dso *dso;
- struct symbol *sym;
- struct symbol *parent;
- u64 ip;
- char level;
- struct callchain_node callchain;
- struct rb_root sorted_chain;
-
- u64 count;
-};
-
-/*
- * configurable sorting bits
- */
-
-struct sort_entry {
- struct list_head list;
-
- const char *header;
-
- int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
- int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
- size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width);
- unsigned int *width;
- bool elide;
-};
-
-static int64_t cmp_null(void *l, void *r)
-{
- if (!l && !r)
- return 0;
- else if (!l)
- return -1;
- else
- return 1;
-}
-
-/* --sort pid */
-
-static int64_t
-sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
-{
- return right->thread->pid - left->thread->pid;
-}
-
-static size_t
-sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width)
-{
- return repsep_fprintf(fp, "%*s:%5d", width - 6,
- self->thread->comm ?: "", self->thread->pid);
-}
-
-static struct sort_entry sort_thread = {
- .header = "Command: Pid",
- .cmp = sort__thread_cmp,
- .print = sort__thread_print,
- .width = &threads__col_width,
-};
-
-/* --sort comm */
-
-static int64_t
-sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
-{
- return right->thread->pid - left->thread->pid;
-}
-
-static int64_t
-sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
-{
- char *comm_l = left->thread->comm;
- char *comm_r = right->thread->comm;
-
- if (!comm_l || !comm_r)
- return cmp_null(comm_l, comm_r);
-
- return strcmp(comm_l, comm_r);
-}
-
-static size_t
-sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width)
-{
- return repsep_fprintf(fp, "%*s", width, self->thread->comm);
-}
-
-static struct sort_entry sort_comm = {
- .header = "Command",
- .cmp = sort__comm_cmp,
- .collapse = sort__comm_collapse,
- .print = sort__comm_print,
- .width = &comms__col_width,
-};
-
-/* --sort dso */
-
-static int64_t
-sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
-{
- struct dso *dso_l = left->dso;
- struct dso *dso_r = right->dso;
-
- if (!dso_l || !dso_r)
- return cmp_null(dso_l, dso_r);
-
- return strcmp(dso_l->name, dso_r->name);
-}
-
-static size_t
-sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width)
-{
- if (self->dso)
- return repsep_fprintf(fp, "%-*s", width, self->dso->name);
-
- return repsep_fprintf(fp, "%*llx", width, (u64)self->ip);
-}
-
-static struct sort_entry sort_dso = {
- .header = "Shared Object",
- .cmp = sort__dso_cmp,
- .print = sort__dso_print,
- .width = &dsos__col_width,
-};
-
-/* --sort symbol */
-
-static int64_t
-sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
-{
- u64 ip_l, ip_r;
-
- if (left->sym == right->sym)
- return 0;
-
- ip_l = left->sym ? left->sym->start : left->ip;
- ip_r = right->sym ? right->sym->start : right->ip;
-
- return (int64_t)(ip_r - ip_l);
-}
static size_t
-sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used)
+callchain__fprintf_left_margin(FILE *fp, int left_margin)
{
- size_t ret = 0;
+ int i;
+ int ret;
- if (verbose)
- ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip,
- dso__symtab_origin(self->dso));
+ ret = fprintf(fp, " ");
- ret += repsep_fprintf(fp, "[%c] ", self->level);
- if (self->sym) {
- ret += repsep_fprintf(fp, "%s", self->sym->name);
-
- if (self->sym->module)
- ret += repsep_fprintf(fp, "\t[%s]",
- self->sym->module->name);
- } else {
- ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip);
- }
+ for (i = 0; i < left_margin; i++)
+ ret += fprintf(fp, " ");
return ret;
}
-static struct sort_entry sort_sym = {
- .header = "Symbol",
- .cmp = sort__sym_cmp,
- .print = sort__sym_print,
-};
-
-/* --sort parent */
-
-static int64_t
-sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
-{
- struct symbol *sym_l = left->parent;
- struct symbol *sym_r = right->parent;
-
- if (!sym_l || !sym_r)
- return cmp_null(sym_l, sym_r);
-
- return strcmp(sym_l->name, sym_r->name);
-}
-
-static size_t
-sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width)
-{
- return repsep_fprintf(fp, "%-*s", width,
- self->parent ? self->parent->name : "[other]");
-}
-
-static unsigned int parent_symbol__col_width;
-
-static struct sort_entry sort_parent = {
- .header = "Parent symbol",
- .cmp = sort__parent_cmp,
- .print = sort__parent_print,
- .width = &parent_symbol__col_width,
-};
-
-static int sort__need_collapse = 0;
-static int sort__has_parent = 0;
-
-struct sort_dimension {
- const char *name;
- struct sort_entry *entry;
- int taken;
-};
-
-static struct sort_dimension sort_dimensions[] = {
- { .name = "pid", .entry = &sort_thread, },
- { .name = "comm", .entry = &sort_comm, },
- { .name = "dso", .entry = &sort_dso, },
- { .name = "symbol", .entry = &sort_sym, },
- { .name = "parent", .entry = &sort_parent, },
-};
-
-static LIST_HEAD(hist_entry__sort_list);
-
-static int sort_dimension__add(const char *tok)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
- struct sort_dimension *sd = &sort_dimensions[i];
-
- if (sd->taken)
- continue;
-
- if (strncasecmp(tok, sd->name, strlen(tok)))
- continue;
-
- if (sd->entry->collapse)
- sort__need_collapse = 1;
-
- if (sd->entry == &sort_parent) {
- int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
- if (ret) {
- char err[BUFSIZ];
-
- regerror(ret, &parent_regex, err, sizeof(err));
- fprintf(stderr, "Invalid regex: %s\n%s",
- parent_pattern, err);
- exit(-1);
- }
- sort__has_parent = 1;
- }
-
- list_add_tail(&sd->entry->list, &hist_entry__sort_list);
- sd->taken = 1;
-
- return 0;
- }
-
- return -ESRCH;
-}
-
-static int64_t
-hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
-{
- struct sort_entry *se;
- int64_t cmp = 0;
-
- list_for_each_entry(se, &hist_entry__sort_list, list) {
- cmp = se->cmp(left, right);
- if (cmp)
- break;
- }
-
- return cmp;
-}
-
-static int64_t
-hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
-{
- struct sort_entry *se;
- int64_t cmp = 0;
-
- list_for_each_entry(se, &hist_entry__sort_list, list) {
- int64_t (*f)(struct hist_entry *, struct hist_entry *);
-
- f = se->collapse ?: se->cmp;
-
- cmp = f(left, right);
- if (cmp)
- break;
- }
-
- return cmp;
-}
-
-static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask)
+static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
+ int left_margin)
{
int i;
size_t ret = 0;
- ret += fprintf(fp, "%s", " ");
+ ret += callchain__fprintf_left_margin(fp, left_margin);
for (i = 0; i < depth; i++)
if (depth_mask & (1 << i))
@@ -432,12 +95,12 @@ static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask)
static size_t
ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth,
int depth_mask, int count, u64 total_samples,
- int hits)
+ int hits, int left_margin)
{
int i;
size_t ret = 0;
- ret += fprintf(fp, "%s", " ");
+ ret += callchain__fprintf_left_margin(fp, left_margin);
for (i = 0; i < depth; i++) {
if (depth_mask & (1 << i))
ret += fprintf(fp, "|");
@@ -475,8 +138,9 @@ static void init_rem_hits(void)
}
static size_t
-callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
- u64 total_samples, int depth, int depth_mask)
+__callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
+ u64 total_samples, int depth, int depth_mask,
+ int left_margin)
{
struct rb_node *node, *next;
struct callchain_node *child;
@@ -517,7 +181,8 @@ callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
* But we keep the older depth mask for the line seperator
* to keep the level link until we reach the last child
*/
- ret += ipchain__fprintf_graph_line(fp, depth, depth_mask);
+ ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
+ left_margin);
i = 0;
list_for_each_entry(chain, &child->val, list) {
if (chain->ip >= PERF_CONTEXT_MAX)
@@ -525,11 +190,13 @@ callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
ret += ipchain__fprintf_graph(fp, chain, depth,
new_depth_mask, i++,
new_total,
- cumul);
+ cumul,
+ left_margin);
}
- ret += callchain__fprintf_graph(fp, child, new_total,
- depth + 1,
- new_depth_mask | (1 << depth));
+ ret += __callchain__fprintf_graph(fp, child, new_total,
+ depth + 1,
+ new_depth_mask | (1 << depth),
+ left_margin);
node = next;
}
@@ -543,12 +210,51 @@ callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
new_depth_mask, 0, new_total,
- remaining);
+ remaining, left_margin);
}
return ret;
}
+
+static size_t
+callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
+ u64 total_samples, int left_margin)
+{
+ struct callchain_list *chain;
+ bool printed = false;
+ int i = 0;
+ int ret = 0;
+
+ list_for_each_entry(chain, &self->val, list) {
+ if (chain->ip >= PERF_CONTEXT_MAX)
+ continue;
+
+ if (!i++ && sort__first_dimension == SORT_SYM)
+ continue;
+
+ if (!printed) {
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ ret += fprintf(fp, "|\n");
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ ret += fprintf(fp, "---");
+
+ left_margin += 3;
+ printed = true;
+ } else
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+
+ if (chain->sym)
+ ret += fprintf(fp, " %s\n", chain->sym->name);
+ else
+ ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
+ }
+
+ ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
+
+ return ret;
+}
+
static size_t
callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
u64 total_samples)
@@ -577,7 +283,7 @@ callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
static size_t
hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
- u64 total_samples)
+ u64 total_samples, int left_margin)
{
struct rb_node *rb_node;
struct callchain_node *chain;
@@ -597,8 +303,8 @@ hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
break;
case CHAIN_GRAPH_ABS: /* Falldown */
case CHAIN_GRAPH_REL:
- ret += callchain__fprintf_graph(fp, chain,
- total_samples, 1, 1);
+ ret += callchain__fprintf_graph(fp, chain, total_samples,
+ left_margin);
case CHAIN_NONE:
default:
break;
@@ -610,7 +316,6 @@ hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
return ret;
}
-
static size_t
hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
{
@@ -644,8 +349,19 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
ret += fprintf(fp, "\n");
- if (callchain)
- hist_entry_callchain__fprintf(fp, self, total_samples);
+ if (callchain) {
+ int left_margin = 0;
+
+ if (sort__first_dimension == SORT_COMM) {
+ se = list_first_entry(&hist_entry__sort_list, typeof(*se),
+ list);
+ left_margin = se->width ? *se->width : 0;
+ left_margin -= thread__comm_len(self->thread);
+ }
+
+ hist_entry_callchain__fprintf(fp, self, total_samples,
+ left_margin);
+ }
return ret;
}
@@ -695,22 +411,17 @@ static int thread__set_comm_adjust(struct thread *self, const char *comm)
static struct symbol *
-resolve_symbol(struct thread *thread, struct map **mapp,
- struct dso **dsop, u64 *ipp)
+resolve_symbol(struct thread *thread, struct map **mapp, u64 *ipp)
{
- struct dso *dso = dsop ? *dsop : NULL;
struct map *map = mapp ? *mapp : NULL;
u64 ip = *ipp;
- if (!thread)
- return NULL;
-
- if (dso)
- goto got_dso;
-
if (map)
goto got_map;
+ if (!thread)
+ return NULL;
+
map = thread__find_map(thread, ip);
if (map != NULL) {
/*
@@ -725,29 +436,26 @@ resolve_symbol(struct thread *thread, struct map **mapp,
*mapp = map;
got_map:
ip = map->map_ip(map, ip);
-
- dso = map->dso;
} else {
/*
* If this is outside of all known maps,
* and is a negative address, try to look it
* up in the kernel dso, as it might be a
- * vsyscall (which executes in user-mode):
+ * vsyscall or vdso (which executes in user-mode).
+ *
+ * XXX This is nasty, we should have a symbol list in
+ * the "[vdso]" dso, but for now lets use the old
+ * trick of looking in the whole kernel symbol list.
*/
if ((long long)ip < 0)
- dso = kernel_dso;
+ return kernel_maps__find_symbol(ip, mapp);
}
- dump_printf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
+ dump_printf(" ...... dso: %s\n",
+ map ? map->dso->long_name : "<not found>");
dump_printf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
*ipp = ip;
- if (dsop)
- *dsop = dso;
-
- if (!dso)
- return NULL;
-got_dso:
- return dso->find_symbol(dso, ip);
+ return map ? map->dso->find_symbol(map->dso, ip) : NULL;
}
static int call__match(struct symbol *sym)
@@ -758,9 +466,9 @@ static int call__match(struct symbol *sym)
return 0;
}
-static struct symbol **
-resolve_callchain(struct thread *thread, struct map *map __used,
- struct ip_callchain *chain, struct hist_entry *entry)
+static struct symbol **resolve_callchain(struct thread *thread, struct map *map,
+ struct ip_callchain *chain,
+ struct symbol **parent)
{
u64 context = PERF_CONTEXT_MAX;
struct symbol **syms = NULL;
@@ -776,8 +484,7 @@ resolve_callchain(struct thread *thread, struct map *map __used,
for (i = 0; i < chain->nr; i++) {
u64 ip = chain->ips[i];
- struct dso *dso = NULL;
- struct symbol *sym;
+ struct symbol *sym = NULL;
if (ip >= PERF_CONTEXT_MAX) {
context = ip;
@@ -786,21 +493,18 @@ resolve_callchain(struct thread *thread, struct map *map __used,
switch (context) {
case PERF_CONTEXT_HV:
- dso = hypervisor_dso;
break;
case PERF_CONTEXT_KERNEL:
- dso = kernel_dso;
+ sym = kernel_maps__find_symbol(ip, &map);
break;
default:
+ sym = resolve_symbol(thread, &map, &ip);
break;
}
- sym = resolve_symbol(thread, NULL, &dso, &ip);
-
if (sym) {
- if (sort__has_parent && call__match(sym) &&
- !entry->parent)
- entry->parent = sym;
+ if (sort__has_parent && !*parent && call__match(sym))
+ *parent = sym;
if (!callchain)
break;
syms[i] = sym;
@@ -815,177 +519,35 @@ resolve_callchain(struct thread *thread, struct map *map __used,
*/
static int
-hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
+hist_entry__add(struct thread *thread, struct map *map,
struct symbol *sym, u64 ip, struct ip_callchain *chain,
char level, u64 count)
{
- struct rb_node **p = &hist.rb_node;
- struct rb_node *parent = NULL;
+ struct symbol **syms = NULL, *parent = NULL;
+ bool hit;
struct hist_entry *he;
- struct symbol **syms = NULL;
- struct hist_entry entry = {
- .thread = thread,
- .map = map,
- .dso = dso,
- .sym = sym,
- .ip = ip,
- .level = level,
- .count = count,
- .parent = NULL,
- .sorted_chain = RB_ROOT
- };
- int cmp;
if ((sort__has_parent || callchain) && chain)
- syms = resolve_callchain(thread, map, chain, &entry);
-
- while (*p != NULL) {
- parent = *p;
- he = rb_entry(parent, struct hist_entry, rb_node);
-
- cmp = hist_entry__cmp(&entry, he);
+ syms = resolve_callchain(thread, map, chain, &parent);
- if (!cmp) {
- he->count += count;
- if (callchain) {
- append_chain(&he->callchain, chain, syms);
- free(syms);
- }
- return 0;
- }
+ he = __hist_entry__add(thread, map, sym, parent,
+ ip, count, level, &hit);
+ if (he == NULL)
+ return -ENOMEM;
- if (cmp < 0)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
+ if (hit)
+ he->count += count;
- he = malloc(sizeof(*he));
- if (!he)
- return -ENOMEM;
- *he = entry;
if (callchain) {
- callchain_init(&he->callchain);
+ if (!hit)
+ callchain_init(&he->callchain);
append_chain(&he->callchain, chain, syms);
free(syms);
}
- rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, &hist);
return 0;
}
-static void hist_entry__free(struct hist_entry *he)
-{
- free(he);
-}
-
-/*
- * collapse the histogram
- */
-
-static struct rb_root collapse_hists;
-
-static void collapse__insert_entry(struct hist_entry *he)
-{
- struct rb_node **p = &collapse_hists.rb_node;
- struct rb_node *parent = NULL;
- struct hist_entry *iter;
- int64_t cmp;
-
- while (*p != NULL) {
- parent = *p;
- iter = rb_entry(parent, struct hist_entry, rb_node);
-
- cmp = hist_entry__collapse(iter, he);
-
- if (!cmp) {
- iter->count += he->count;
- hist_entry__free(he);
- return;
- }
-
- if (cmp < 0)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
-
- rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, &collapse_hists);
-}
-
-static void collapse__resort(void)
-{
- struct rb_node *next;
- struct hist_entry *n;
-
- if (!sort__need_collapse)
- return;
-
- next = rb_first(&hist);
- while (next) {
- n = rb_entry(next, struct hist_entry, rb_node);
- next = rb_next(&n->rb_node);
-
- rb_erase(&n->rb_node, &hist);
- collapse__insert_entry(n);
- }
-}
-
-/*
- * reverse the map, sort on count.
- */
-
-static struct rb_root output_hists;
-
-static void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits)
-{
- struct rb_node **p = &output_hists.rb_node;
- struct rb_node *parent = NULL;
- struct hist_entry *iter;
-
- if (callchain)
- callchain_param.sort(&he->sorted_chain, &he->callchain,
- min_callchain_hits, &callchain_param);
-
- while (*p != NULL) {
- parent = *p;
- iter = rb_entry(parent, struct hist_entry, rb_node);
-
- if (he->count > iter->count)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
-
- rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, &output_hists);
-}
-
-static void output__resort(u64 total_samples)
-{
- struct rb_node *next;
- struct hist_entry *n;
- struct rb_root *tree = &hist;
- u64 min_callchain_hits;
-
- min_callchain_hits = total_samples * (callchain_param.min_percent / 100);
-
- if (sort__need_collapse)
- tree = &collapse_hists;
-
- next = rb_first(tree);
-
- while (next) {
- n = rb_entry(next, struct hist_entry, rb_node);
- next = rb_next(&n->rb_node);
-
- rb_erase(&n->rb_node, tree);
- output__insert_entry(n, min_callchain_hits);
- }
-}
-
static size_t output__fprintf(FILE *fp, u64 total_samples)
{
struct hist_entry *pos;
@@ -1080,13 +642,6 @@ print_entries:
return ret;
}
-static unsigned long total = 0,
- total_mmap = 0,
- total_comm = 0,
- total_fork = 0,
- total_unknown = 0,
- total_lost = 0;
-
static int validate_chain(struct ip_callchain *chain, event_t *event)
{
unsigned int chain_size;
@@ -1104,17 +659,14 @@ static int
process_sample_event(event_t *event, unsigned long offset, unsigned long head)
{
char level;
- int show = 0;
- struct dso *dso = NULL;
- struct thread *thread;
+ struct symbol *sym = NULL;
u64 ip = event->ip.ip;
u64 period = 1;
struct map *map = NULL;
void *more_data = event->ip.__more_data;
struct ip_callchain *chain = NULL;
int cpumode;
-
- thread = threads__findnew(event->ip.pid, &threads, &last_match);
+ struct thread *thread = threads__findnew(event->ip.pid);
if (sample_type & PERF_SAMPLE_PERIOD) {
period = *(u64 *)more_data;
@@ -1137,7 +689,8 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
dump_printf("... chain: nr:%Lu\n", chain->nr);
if (validate_chain(chain, event) < 0) {
- eprintf("call-chain problem with event, skipping it.\n");
+ pr_debug("call-chain problem with event, "
+ "skipping it.\n");
return 0;
}
@@ -1147,56 +700,49 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
}
}
- dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
-
if (thread == NULL) {
- eprintf("problem processing %d event, skipping it.\n",
+ pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+
if (comm_list && !strlist__has_entry(comm_list, thread->comm))
return 0;
cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
if (cpumode == PERF_RECORD_MISC_KERNEL) {
- show = SHOW_KERNEL;
level = 'k';
-
- dso = kernel_dso;
-
- dump_printf(" ...... dso: %s\n", dso->name);
-
+ sym = kernel_maps__find_symbol(ip, &map);
+ dump_printf(" ...... dso: %s\n",
+ map ? map->dso->long_name : "<not found>");
} else if (cpumode == PERF_RECORD_MISC_USER) {
-
- show = SHOW_USER;
level = '.';
+ sym = resolve_symbol(thread, &map, &ip);
} else {
- show = SHOW_HV;
level = 'H';
-
- dso = hypervisor_dso;
-
dump_printf(" ...... dso: [hypervisor]\n");
}
- if (show & show_mask) {
- struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip);
-
- if (dso_list && (!dso || !dso->name ||
- !strlist__has_entry(dso_list, dso->name)))
- return 0;
+ if (dso_list &&
+ (!map || !map->dso ||
+ !(strlist__has_entry(dso_list, map->dso->short_name) ||
+ (map->dso->short_name != map->dso->long_name &&
+ strlist__has_entry(dso_list, map->dso->long_name)))))
+ return 0;
- if (sym_list && (!sym || !strlist__has_entry(sym_list, sym->name)))
- return 0;
+ if (sym_list && sym && !strlist__has_entry(sym_list, sym->name))
+ return 0;
- if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) {
- eprintf("problem incrementing symbol count, skipping event\n");
- return -1;
- }
+ if (hist_entry__add(thread, map, sym, ip,
+ chain, level, period)) {
+ pr_debug("problem incrementing symbol count, skipping event\n");
+ return -1;
}
+
total += period;
return 0;
@@ -1205,10 +751,8 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
{
- struct thread *thread;
- struct map *map = map__new(&event->mmap, cwd, cwdlen);
-
- thread = threads__findnew(event->mmap.pid, &threads, &last_match);
+ struct map *map = map__new(&event->mmap, cwd, cwdlen, 0, NULL);
+ struct thread *thread = threads__findnew(event->mmap.pid);
dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n",
(void *)(offset + head),
@@ -1234,9 +778,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_comm_event(event_t *event, unsigned long offset, unsigned long head)
{
- struct thread *thread;
-
- thread = threads__findnew(event->comm.pid, &threads, &last_match);
+ struct thread *thread = threads__findnew(event->comm.pid);
dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
(void *)(offset + head),
@@ -1256,11 +798,8 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_task_event(event_t *event, unsigned long offset, unsigned long head)
{
- struct thread *thread;
- struct thread *parent;
-
- thread = threads__findnew(event->fork.pid, &threads, &last_match);
- parent = threads__findnew(event->fork.ppid, &threads, &last_match);
+ struct thread *thread = threads__findnew(event->fork.pid);
+ struct thread *parent = threads__findnew(event->fork.ppid);
dump_printf("%p [%p]: PERF_RECORD_%s: (%d:%d):(%d:%d)\n",
(void *)(offset + head),
@@ -1331,216 +870,79 @@ process_read_event(event_t *event, unsigned long offset, unsigned long head)
return 0;
}
-static int
-process_event(event_t *event, unsigned long offset, unsigned long head)
-{
- trace_event(event);
-
- switch (event->header.type) {
- case PERF_RECORD_SAMPLE:
- return process_sample_event(event, offset, head);
-
- case PERF_RECORD_MMAP:
- return process_mmap_event(event, offset, head);
-
- case PERF_RECORD_COMM:
- return process_comm_event(event, offset, head);
-
- case PERF_RECORD_FORK:
- case PERF_RECORD_EXIT:
- return process_task_event(event, offset, head);
-
- case PERF_RECORD_LOST:
- return process_lost_event(event, offset, head);
-
- case PERF_RECORD_READ:
- return process_read_event(event, offset, head);
-
- /*
- * We dont process them right now but they are fine:
- */
-
- case PERF_RECORD_THROTTLE:
- case PERF_RECORD_UNTHROTTLE:
- return 0;
-
- default:
- return -1;
- }
-
- return 0;
-}
-
-static int __cmd_report(void)
+static int sample_type_check(u64 type)
{
- int ret, rc = EXIT_FAILURE;
- unsigned long offset = 0;
- unsigned long head, shift;
- struct stat input_stat;
- struct thread *idle;
- event_t *event;
- uint32_t size;
- char *buf;
-
- idle = register_idle_thread(&threads, &last_match);
- thread__comm_adjust(idle);
-
- if (show_threads)
- perf_read_values_init(&show_threads_values);
-
- input = open(input_name, O_RDONLY);
- if (input < 0) {
- fprintf(stderr, " failed to open file: %s", input_name);
- if (!strcmp(input_name, "perf.data"))
- fprintf(stderr, " (try 'perf record' first)");
- fprintf(stderr, "\n");
- exit(-1);
- }
-
- ret = fstat(input, &input_stat);
- if (ret < 0) {
- perror("failed to stat file");
- exit(-1);
- }
-
- if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
- fprintf(stderr, "file: %s not owned by current user or root\n", input_name);
- exit(-1);
- }
-
- if (!input_stat.st_size) {
- fprintf(stderr, "zero-sized file, nothing to do!\n");
- exit(0);
- }
-
- header = perf_header__read(input);
- head = header->data_offset;
-
- sample_type = perf_header__sample_type(header);
+ sample_type = type;
if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
if (sort__has_parent) {
fprintf(stderr, "selected --sort parent, but no"
" callchain data. Did you call"
" perf record without -g?\n");
- exit(-1);
+ return -1;
}
if (callchain) {
fprintf(stderr, "selected -g but no callchain data."
" Did you call perf record without"
" -g?\n");
- exit(-1);
+ return -1;
}
} else if (callchain_param.mode != CHAIN_NONE && !callchain) {
callchain = 1;
if (register_callchain_param(&callchain_param) < 0) {
fprintf(stderr, "Can't register callchain"
" params\n");
- exit(-1);
+ return -1;
}
}
- if (load_kernel() < 0) {
- perror("failed to load kernel symbols");
- return EXIT_FAILURE;
- }
-
- if (!full_paths) {
- if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
- perror("failed to get the current directory");
- return EXIT_FAILURE;
- }
- cwdlen = strlen(cwd);
- } else {
- cwd = NULL;
- cwdlen = 0;
- }
-
- shift = page_size * (head / page_size);
- offset += shift;
- head -= shift;
-
-remap:
- buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
- MAP_SHARED, input, offset);
- if (buf == MAP_FAILED) {
- perror("failed to mmap file");
- exit(-1);
- }
-
-more:
- event = (event_t *)(buf + head);
-
- size = event->header.size;
- if (!size)
- size = 8;
-
- if (head + event->header.size >= page_size * mmap_window) {
- int munmap_ret;
-
- shift = page_size * (head / page_size);
-
- munmap_ret = munmap(buf, page_size * mmap_window);
- assert(munmap_ret == 0);
-
- offset += shift;
- head -= shift;
- goto remap;
- }
-
- size = event->header.size;
-
- dump_printf("\n%p [%p]: event: %d\n",
- (void *)(offset + head),
- (void *)(long)event->header.size,
- event->header.type);
-
- if (!size || process_event(event, offset, head) < 0) {
-
- dump_printf("%p [%p]: skipping unknown header type: %d\n",
- (void *)(offset + head),
- (void *)(long)(event->header.size),
- event->header.type);
-
- total_unknown++;
+ return 0;
+}
- /*
- * assume we lost track of the stream, check alignment, and
- * increment a single u64 in the hope to catch on again 'soon'.
- */
+static struct perf_file_handler file_handler = {
+ .process_sample_event = process_sample_event,
+ .process_mmap_event = process_mmap_event,
+ .process_comm_event = process_comm_event,
+ .process_exit_event = process_task_event,
+ .process_fork_event = process_task_event,
+ .process_lost_event = process_lost_event,
+ .process_read_event = process_read_event,
+ .sample_type_check = sample_type_check,
+};
- if (unlikely(head & 7))
- head &= ~7ULL;
- size = 8;
- }
+static int __cmd_report(void)
+{
+ struct thread *idle;
+ int ret;
- head += size;
+ idle = register_idle_thread();
+ thread__comm_adjust(idle);
- if (offset + head >= header->data_offset + header->data_size)
- goto done;
+ if (show_threads)
+ perf_read_values_init(&show_threads_values);
- if (offset + head < (unsigned long)input_stat.st_size)
- goto more;
+ register_perf_file_handler(&file_handler);
-done:
- rc = EXIT_SUCCESS;
- close(input);
+ ret = mmap_dispatch_perf_file(&header, input_name, force, full_paths,
+ &cwdlen, &cwd);
+ if (ret)
+ return ret;
dump_printf(" IP events: %10ld\n", total);
dump_printf(" mmap events: %10ld\n", total_mmap);
dump_printf(" comm events: %10ld\n", total_comm);
dump_printf(" fork events: %10ld\n", total_fork);
dump_printf(" lost events: %10ld\n", total_lost);
- dump_printf(" unknown events: %10ld\n", total_unknown);
+ dump_printf(" unknown events: %10ld\n", file_handler.total_unknown);
if (dump_trace)
return 0;
- if (verbose >= 3)
- threads__fprintf(stdout, &threads);
+ if (verbose > 3)
+ threads__fprintf(stdout);
- if (verbose >= 2)
+ if (verbose > 2)
dsos__fprintf(stdout);
collapse__resort();
@@ -1550,7 +952,7 @@ done:
if (show_threads)
perf_read_values_destroy(&show_threads_values);
- return rc;
+ return ret;
}
static int
@@ -1606,7 +1008,8 @@ setup:
return 0;
}
-static const char * const report_usage[] = {
+//static const char * const report_usage[] = {
+const char * const report_usage[] = {
"perf report [<options>] <command>",
NULL
};
@@ -1692,8 +1095,6 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
{
symbol__init();
- page_size = getpagesize();
-
argc = parse_options(argc, argv, options, report_usage, 0);
setup_sorting();
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index ce2d5be4f30e..9a48d9626be4 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -11,6 +11,7 @@
#include "util/trace-event.h"
#include "util/debug.h"
+#include "util/data_map.h"
#include <sys/types.h>
#include <sys/prctl.h>
@@ -20,26 +21,23 @@
#include <math.h>
static char const *input_name = "perf.data";
-static int input;
-static unsigned long page_size;
-static unsigned long mmap_window = 32;
static unsigned long total_comm = 0;
-static struct rb_root threads;
-static struct thread *last_match;
-
static struct perf_header *header;
static u64 sample_type;
static char default_sort_order[] = "avg, max, switch, runtime";
static char *sort_order = default_sort_order;
+static int profile_cpu = -1;
+
+static char *cwd;
+static int cwdlen;
+
#define PR_SET_NAME 15 /* Set process name */
#define MAX_CPUS 4096
-#define BUG_ON(x) assert(!(x))
-
static u64 run_measurement_overhead;
static u64 sleep_measurement_overhead;
@@ -74,6 +72,7 @@ enum sched_event_type {
SCHED_EVENT_RUN,
SCHED_EVENT_SLEEP,
SCHED_EVENT_WAKEUP,
+ SCHED_EVENT_MIGRATION,
};
struct sched_atom {
@@ -398,6 +397,8 @@ process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
ret = sem_post(atom->wait_sem);
BUG_ON(ret);
break;
+ case SCHED_EVENT_MIGRATION:
+ break;
default:
BUG_ON(1);
}
@@ -635,9 +636,7 @@ static void test_calibrations(void)
static int
process_comm_event(event_t *event, unsigned long offset, unsigned long head)
{
- struct thread *thread;
-
- thread = threads__findnew(event->comm.pid, &threads, &last_match);
+ struct thread *thread = threads__findnew(event->comm.tid);
dump_printf("%p [%p]: perf_event_comm: %s:%d\n",
(void *)(offset + head),
@@ -745,6 +744,22 @@ struct trace_fork_event {
u32 child_pid;
};
+struct trace_migrate_task_event {
+ u32 size;
+
+ u16 common_type;
+ u8 common_flags;
+ u8 common_preempt_count;
+ u32 common_pid;
+ u32 common_tgid;
+
+ char comm[16];
+ u32 pid;
+
+ u32 prio;
+ u32 cpu;
+};
+
struct trace_sched_handler {
void (*switch_event)(struct trace_switch_event *,
struct event *,
@@ -769,6 +784,12 @@ struct trace_sched_handler {
int cpu,
u64 timestamp,
struct thread *thread);
+
+ void (*migrate_task_event)(struct trace_migrate_task_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
};
@@ -1058,8 +1079,8 @@ latency_switch_event(struct trace_switch_event *switch_event,
die("hm, delta: %Ld < 0 ?\n", delta);
- sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
- sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
+ sched_out = threads__findnew(switch_event->prev_pid);
+ sched_in = threads__findnew(switch_event->next_pid);
out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
if (!out_events) {
@@ -1092,13 +1113,10 @@ latency_runtime_event(struct trace_runtime_event *runtime_event,
u64 timestamp,
struct thread *this_thread __used)
{
- struct work_atoms *atoms;
- struct thread *thread;
+ struct thread *thread = threads__findnew(runtime_event->pid);
+ struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
-
- thread = threads__findnew(runtime_event->pid, &threads, &last_match);
- atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
if (!atoms) {
thread_atoms_insert(thread);
atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
@@ -1125,7 +1143,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
if (!wakeup_event->success)
return;
- wakee = threads__findnew(wakeup_event->pid, &threads, &last_match);
+ wakee = threads__findnew(wakeup_event->pid);
atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
if (!atoms) {
thread_atoms_insert(wakee);
@@ -1139,7 +1157,12 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
atom = list_entry(atoms->work_list.prev, struct work_atom, list);
- if (atom->state != THREAD_SLEEPING)
+ /*
+ * You WILL be missing events if you've recorded only
+ * one CPU, or are only looking at only one, so don't
+ * make useless noise.
+ */
+ if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
nr_state_machine_bugs++;
nr_timestamps++;
@@ -1152,11 +1175,51 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
atom->wake_up_time = timestamp;
}
+static void
+latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct work_atoms *atoms;
+ struct work_atom *atom;
+ struct thread *migrant;
+
+ /*
+ * Only need to worry about migration when profiling one CPU.
+ */
+ if (profile_cpu == -1)
+ return;
+
+ migrant = threads__findnew(migrate_task_event->pid);
+ atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
+ if (!atoms) {
+ thread_atoms_insert(migrant);
+ register_pid(migrant->pid, migrant->comm);
+ atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
+ if (!atoms)
+ die("migration-event: Internal tree error");
+ add_sched_out_event(atoms, 'R', timestamp);
+ }
+
+ BUG_ON(list_empty(&atoms->work_list));
+
+ atom = list_entry(atoms->work_list.prev, struct work_atom, list);
+ atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
+
+ nr_timestamps++;
+
+ if (atom->sched_out_time > timestamp)
+ nr_unordered_timestamps++;
+}
+
static struct trace_sched_handler lat_ops = {
.wakeup_event = latency_wakeup_event,
.switch_event = latency_switch_event,
.runtime_event = latency_runtime_event,
.fork_event = latency_fork_event,
+ .migrate_task_event = latency_migrate_task_event,
};
static void output_lat_thread(struct work_atoms *work_list)
@@ -1385,8 +1448,8 @@ map_switch_event(struct trace_switch_event *switch_event,
die("hm, delta: %Ld < 0 ?\n", delta);
- sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
- sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
+ sched_out = threads__findnew(switch_event->prev_pid);
+ sched_in = threads__findnew(switch_event->next_pid);
curr_thread[this_cpu] = sched_in;
@@ -1517,6 +1580,26 @@ process_sched_exit_event(struct event *event,
}
static void
+process_sched_migrate_task_event(struct raw_event_sample *raw,
+ struct event *event,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_migrate_task_event migrate_task_event;
+
+ FILL_COMMON_FIELDS(migrate_task_event, event, raw->data);
+
+ FILL_ARRAY(migrate_task_event, comm, event, raw->data);
+ FILL_FIELD(migrate_task_event, pid, event, raw->data);
+ FILL_FIELD(migrate_task_event, prio, event, raw->data);
+ FILL_FIELD(migrate_task_event, cpu, event, raw->data);
+
+ if (trace_handler->migrate_task_event)
+ trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread);
+}
+
+static void
process_raw_event(event_t *raw_event __used, void *more_data,
int cpu, u64 timestamp, struct thread *thread)
{
@@ -1539,23 +1622,24 @@ process_raw_event(event_t *raw_event __used, void *more_data,
process_sched_fork_event(raw, event, cpu, timestamp, thread);
if (!strcmp(event->name, "sched_process_exit"))
process_sched_exit_event(event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "sched_migrate_task"))
+ process_sched_migrate_task_event(raw, event, cpu, timestamp, thread);
}
static int
process_sample_event(event_t *event, unsigned long offset, unsigned long head)
{
- char level;
- int show = 0;
- struct dso *dso = NULL;
struct thread *thread;
u64 ip = event->ip.ip;
u64 timestamp = -1;
u32 cpu = -1;
u64 period = 1;
void *more_data = event->ip.__more_data;
- int cpumode;
- thread = threads__findnew(event->ip.pid, &threads, &last_match);
+ if (!(sample_type & PERF_SAMPLE_RAW))
+ return 0;
+
+ thread = threads__findnew(event->ip.pid);
if (sample_type & PERF_SAMPLE_TIME) {
timestamp = *(u64 *)more_data;
@@ -1581,169 +1665,60 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
(void *)(long)ip,
(long long)period);
- dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
-
if (thread == NULL) {
- eprintf("problem processing %d event, skipping it.\n",
- event->header.type);
+ pr_debug("problem processing %d event, skipping it.\n",
+ event->header.type);
return -1;
}
- cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
-
- if (cpumode == PERF_RECORD_MISC_KERNEL) {
- show = SHOW_KERNEL;
- level = 'k';
-
- dso = kernel_dso;
-
- dump_printf(" ...... dso: %s\n", dso->name);
-
- } else if (cpumode == PERF_RECORD_MISC_USER) {
-
- show = SHOW_USER;
- level = '.';
-
- } else {
- show = SHOW_HV;
- level = 'H';
-
- dso = hypervisor_dso;
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
- dump_printf(" ...... dso: [hypervisor]\n");
- }
+ if (profile_cpu != -1 && profile_cpu != (int) cpu)
+ return 0;
- if (sample_type & PERF_SAMPLE_RAW)
- process_raw_event(event, more_data, cpu, timestamp, thread);
+ process_raw_event(event, more_data, cpu, timestamp, thread);
return 0;
}
static int
-process_event(event_t *event, unsigned long offset, unsigned long head)
+process_lost_event(event_t *event __used,
+ unsigned long offset __used,
+ unsigned long head __used)
{
- trace_event(event);
+ nr_lost_chunks++;
+ nr_lost_events += event->lost.lost;
- nr_events++;
- switch (event->header.type) {
- case PERF_RECORD_MMAP:
- return 0;
- case PERF_RECORD_LOST:
- nr_lost_chunks++;
- nr_lost_events += event->lost.lost;
- return 0;
-
- case PERF_RECORD_COMM:
- return process_comm_event(event, offset, head);
-
- case PERF_RECORD_EXIT ... PERF_RECORD_READ:
- return 0;
+ return 0;
+}
- case PERF_RECORD_SAMPLE:
- return process_sample_event(event, offset, head);
+static int sample_type_check(u64 type)
+{
+ sample_type = type;
- case PERF_RECORD_MAX:
- default:
+ if (!(sample_type & PERF_SAMPLE_RAW)) {
+ fprintf(stderr,
+ "No trace sample to read. Did you call perf record "
+ "without -R?");
return -1;
}
return 0;
}
+static struct perf_file_handler file_handler = {
+ .process_sample_event = process_sample_event,
+ .process_comm_event = process_comm_event,
+ .process_lost_event = process_lost_event,
+ .sample_type_check = sample_type_check,
+};
+
static int read_events(void)
{
- int ret, rc = EXIT_FAILURE;
- unsigned long offset = 0;
- unsigned long head = 0;
- struct stat perf_stat;
- event_t *event;
- uint32_t size;
- char *buf;
-
- trace_report();
- register_idle_thread(&threads, &last_match);
-
- input = open(input_name, O_RDONLY);
- if (input < 0) {
- perror("failed to open file");
- exit(-1);
- }
-
- ret = fstat(input, &perf_stat);
- if (ret < 0) {
- perror("failed to stat file");
- exit(-1);
- }
-
- if (!perf_stat.st_size) {
- fprintf(stderr, "zero-sized file, nothing to do!\n");
- exit(0);
- }
- header = perf_header__read(input);
- head = header->data_offset;
- sample_type = perf_header__sample_type(header);
-
- if (!(sample_type & PERF_SAMPLE_RAW))
- die("No trace sample to read. Did you call perf record "
- "without -R?");
-
- if (load_kernel() < 0) {
- perror("failed to load kernel symbols");
- return EXIT_FAILURE;
- }
-
-remap:
- buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
- MAP_SHARED, input, offset);
- if (buf == MAP_FAILED) {
- perror("failed to mmap file");
- exit(-1);
- }
-
-more:
- event = (event_t *)(buf + head);
-
- size = event->header.size;
- if (!size)
- size = 8;
-
- if (head + event->header.size >= page_size * mmap_window) {
- unsigned long shift = page_size * (head / page_size);
- int res;
-
- res = munmap(buf, page_size * mmap_window);
- assert(res == 0);
-
- offset += shift;
- head -= shift;
- goto remap;
- }
-
- size = event->header.size;
-
-
- if (!size || process_event(event, offset, head) < 0) {
-
- /*
- * assume we lost track of the stream, check alignment, and
- * increment a single u64 in the hope to catch on again 'soon'.
- */
-
- if (unlikely(head & 7))
- head &= ~7ULL;
-
- size = 8;
- }
-
- head += size;
-
- if (offset + head < (unsigned long)perf_stat.st_size)
- goto more;
-
- rc = EXIT_SUCCESS;
- close(input);
+ register_idle_thread();
+ register_perf_file_handler(&file_handler);
- return rc;
+ return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd);
}
static void print_bad_events(void)
@@ -1883,6 +1858,8 @@ static const struct option latency_options[] = {
"sort by key(s): runtime, switch, avg, max"),
OPT_BOOLEAN('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
+ OPT_INTEGER('C', "CPU", &profile_cpu,
+ "CPU to profile on"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_END()
@@ -1961,7 +1938,6 @@ static int __cmd_record(int argc, const char **argv)
int cmd_sched(int argc, const char **argv, const char *prefix __used)
{
symbol__init();
- page_size = getpagesize();
argc = parse_options(argc, argv, sched_options, sched_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 3db31e7bf173..c6df3770b87e 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -50,15 +50,17 @@
static struct perf_event_attr default_attrs[] = {
- { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
- { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES},
- { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
- { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
-
- { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
- { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
- { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES},
- { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES },
+ { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
+ { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
+ { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
+ { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
+
+ { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
+ { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
+ { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+ { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
+ { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES },
+ { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES },
};
@@ -125,6 +127,7 @@ struct stats event_res_stats[MAX_COUNTERS][3];
struct stats runtime_nsecs_stats;
struct stats walltime_nsecs_stats;
struct stats runtime_cycles_stats;
+struct stats runtime_branches_stats;
#define MATCH_EVENT(t, c, counter) \
(attrs[counter].type == PERF_TYPE_##t && \
@@ -235,6 +238,8 @@ static void read_counter(int counter)
update_stats(&runtime_nsecs_stats, count[0]);
if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter))
update_stats(&runtime_cycles_stats, count[0]);
+ if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter))
+ update_stats(&runtime_branches_stats, count[0]);
}
static int run_perf_stat(int argc __used, const char **argv)
@@ -352,6 +357,14 @@ static void abs_printout(int counter, double avg)
ratio = avg / total;
fprintf(stderr, " # %10.3f IPC ", ratio);
+ } else if (MATCH_EVENT(HARDWARE, HW_BRANCH_MISSES, counter)) {
+ total = avg_stats(&runtime_branches_stats);
+
+ if (total)
+ ratio = avg * 100 / total;
+
+ fprintf(stderr, " # %10.3f %% ", ratio);
+
} else {
total = avg_stats(&runtime_nsecs_stats);
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 702d8fe58fbc..0a2f22261c3a 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -153,6 +153,17 @@ static struct wake_event *wake_events;
struct sample_wrapper *all_samples;
+
+struct process_filter;
+struct process_filter {
+ char *name;
+ int pid;
+ struct process_filter *next;
+};
+
+static struct process_filter *process_filter;
+
+
static struct per_pid *find_create_pid(int pid)
{
struct per_pid *cursor = all_data;
@@ -763,21 +774,42 @@ static void draw_wakeups(void)
c = p->all;
while (c) {
if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
- if (p->pid == we->waker) {
+ if (p->pid == we->waker && !from) {
from = c->Y;
- task_from = c->comm;
+ task_from = strdup(c->comm);
}
- if (p->pid == we->wakee) {
+ if (p->pid == we->wakee && !to) {
to = c->Y;
- task_to = c->comm;
+ task_to = strdup(c->comm);
}
}
c = c->next;
}
+ c = p->all;
+ while (c) {
+ if (p->pid == we->waker && !from) {
+ from = c->Y;
+ task_from = strdup(c->comm);
+ }
+ if (p->pid == we->wakee && !to) {
+ to = c->Y;
+ task_to = strdup(c->comm);
+ }
+ c = c->next;
+ }
}
p = p->next;
}
+ if (!task_from) {
+ task_from = malloc(40);
+ sprintf(task_from, "[%i]", we->waker);
+ }
+ if (!task_to) {
+ task_to = malloc(40);
+ sprintf(task_to, "[%i]", we->wakee);
+ }
+
if (we->waker == -1)
svg_interrupt(we->time, to);
else if (from && to && abs(from - to) == 1)
@@ -785,6 +817,9 @@ static void draw_wakeups(void)
else
svg_partial_wakeline(we->time, from, task_from, to, task_to);
we = we->next;
+
+ free(task_from);
+ free(task_to);
}
}
@@ -858,12 +893,89 @@ static void draw_process_bars(void)
}
}
+static void add_process_filter(const char *string)
+{
+ struct process_filter *filt;
+ int pid;
+
+ pid = strtoull(string, NULL, 10);
+ filt = malloc(sizeof(struct process_filter));
+ if (!filt)
+ return;
+
+ filt->name = strdup(string);
+ filt->pid = pid;
+ filt->next = process_filter;
+
+ process_filter = filt;
+}
+
+static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
+{
+ struct process_filter *filt;
+ if (!process_filter)
+ return 1;
+
+ filt = process_filter;
+ while (filt) {
+ if (filt->pid && p->pid == filt->pid)
+ return 1;
+ if (strcmp(filt->name, c->comm) == 0)
+ return 1;
+ filt = filt->next;
+ }
+ return 0;
+}
+
+static int determine_display_tasks_filtered(void)
+{
+ struct per_pid *p;
+ struct per_pidcomm *c;
+ int count = 0;
+
+ p = all_data;
+ while (p) {
+ p->display = 0;
+ if (p->start_time == 1)
+ p->start_time = first_time;
+
+ /* no exit marker, task kept running to the end */
+ if (p->end_time == 0)
+ p->end_time = last_time;
+
+ c = p->all;
+
+ while (c) {
+ c->display = 0;
+
+ if (c->start_time == 1)
+ c->start_time = first_time;
+
+ if (passes_filter(p, c)) {
+ c->display = 1;
+ p->display = 1;
+ count++;
+ }
+
+ if (c->end_time == 0)
+ c->end_time = last_time;
+
+ c = c->next;
+ }
+ p = p->next;
+ }
+ return count;
+}
+
static int determine_display_tasks(u64 threshold)
{
struct per_pid *p;
struct per_pidcomm *c;
int count = 0;
+ if (process_filter)
+ return determine_display_tasks_filtered();
+
p = all_data;
while (p) {
p->display = 0;
@@ -1050,12 +1162,10 @@ more:
size = event->header.size;
if (!size || process_event(event) < 0) {
-
- printf("%p [%p]: skipping unknown header type: %d\n",
- (void *)(offset + head),
- (void *)(long)(event->header.size),
- event->header.type);
-
+ pr_warning("%p [%p]: skipping unknown header type: %d\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->header.type);
/*
* assume we lost track of the stream, check alignment, and
* increment a single u64 in the hope to catch on again 'soon'.
@@ -1088,7 +1198,8 @@ done:
write_svg_file(output_name);
- printf("Written %2.1f seconds of trace to %s.\n", (last_time - first_time) / 1000000000.0, output_name);
+ pr_info("Written %2.1f seconds of trace to %s.\n",
+ (last_time - first_time) / 1000000000.0, output_name);
return rc;
}
@@ -1129,6 +1240,14 @@ static int __cmd_record(int argc, const char **argv)
return cmd_record(i, rec_argv, NULL);
}
+static int
+parse_process(const struct option *opt __used, const char *arg, int __used unset)
+{
+ if (arg)
+ add_process_filter(arg);
+ return 0;
+}
+
static const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
@@ -1136,8 +1255,11 @@ static const struct option options[] = {
"output file name"),
OPT_INTEGER('w', "width", &svg_page_width,
"page width"),
- OPT_BOOLEAN('p', "power-only", &power_only,
+ OPT_BOOLEAN('P', "power-only", &power_only,
"output power data only"),
+ OPT_CALLBACK('p', "process", NULL, "process",
+ "process selector. Pass a pid or process name.",
+ parse_process),
OPT_END()
};
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 37512e936235..4a9fe228be2a 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -22,6 +22,7 @@
#include "util/symbol.h"
#include "util/color.h"
+#include "util/thread.h"
#include "util/util.h"
#include <linux/rbtree.h>
#include "util/parse-options.h"
@@ -54,26 +55,26 @@
static int fd[MAX_NR_CPUS][MAX_COUNTERS];
-static int system_wide = 0;
+static int system_wide = 0;
-static int default_interval = 100000;
+static int default_interval = 0;
-static int count_filter = 5;
-static int print_entries = 15;
+static int count_filter = 5;
+static int print_entries = 15;
-static int target_pid = -1;
-static int inherit = 0;
-static int profile_cpu = -1;
-static int nr_cpus = 0;
-static unsigned int realtime_prio = 0;
-static int group = 0;
+static int target_pid = -1;
+static int inherit = 0;
+static int profile_cpu = -1;
+static int nr_cpus = 0;
+static unsigned int realtime_prio = 0;
+static int group = 0;
static unsigned int page_size;
-static unsigned int mmap_pages = 16;
-static int freq = 0;
+static unsigned int mmap_pages = 16;
+static int freq = 1000; /* 1 KHz */
-static int delay_secs = 2;
-static int zero;
-static int dump_symtab;
+static int delay_secs = 2;
+static int zero = 0;
+static int dump_symtab = 0;
/*
* Source
@@ -86,19 +87,16 @@ struct source_line {
struct source_line *next;
};
-static char *sym_filter = NULL;
-struct sym_entry *sym_filter_entry = NULL;
-static int sym_pcnt_filter = 5;
-static int sym_counter = 0;
-static int display_weighted = -1;
+static char *sym_filter = NULL;
+struct sym_entry *sym_filter_entry = NULL;
+static int sym_pcnt_filter = 5;
+static int sym_counter = 0;
+static int display_weighted = -1;
/*
* Symbols
*/
-static u64 min_ip;
-static u64 max_ip = -1ll;
-
struct sym_entry {
struct rb_node rb_node;
struct list_head node;
@@ -106,6 +104,7 @@ struct sym_entry {
unsigned long snap_count;
double weight;
int skip;
+ struct map *map;
struct source_line *source;
struct source_line *lines;
struct source_line **lines_tail;
@@ -119,12 +118,11 @@ struct sym_entry {
static void parse_source(struct sym_entry *syme)
{
struct symbol *sym;
- struct module *module;
- struct section *section = NULL;
+ struct map *map;
FILE *file;
char command[PATH_MAX*2];
- const char *path = vmlinux_name;
- u64 start, end, len;
+ const char *path;
+ u64 len;
if (!syme)
return;
@@ -135,27 +133,16 @@ static void parse_source(struct sym_entry *syme)
}
sym = (struct symbol *)(syme + 1);
- module = sym->module;
-
- if (module)
- path = module->path;
- if (!path)
- return;
-
- start = sym->obj_start;
- if (!start)
- start = sym->start;
+ map = syme->map;
+ path = map->dso->long_name;
- if (module) {
- section = module->sections->find_section(module->sections, ".text");
- if (section)
- start -= section->vma;
- }
-
- end = start + sym->end - sym->start + 1;
len = sym->end - sym->start;
- sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", start, end, path);
+ sprintf(command,
+ "objdump --start-address=0x%016Lx "
+ "--stop-address=0x%016Lx -dS %s",
+ map->unmap_ip(map, sym->start),
+ map->unmap_ip(map, sym->end), path);
file = popen(command, "r");
if (!file)
@@ -187,13 +174,11 @@ static void parse_source(struct sym_entry *syme)
if (strlen(src->line)>8 && src->line[8] == ':') {
src->eip = strtoull(src->line, NULL, 16);
- if (section)
- src->eip += section->vma;
+ src->eip = map->unmap_ip(map, src->eip);
}
if (strlen(src->line)>8 && src->line[16] == ':') {
src->eip = strtoull(src->line, NULL, 16);
- if (section)
- src->eip += section->vma;
+ src->eip = map->unmap_ip(map, src->eip);
}
}
pclose(file);
@@ -245,16 +230,9 @@ static void lookup_sym_source(struct sym_entry *syme)
struct symbol *symbol = (struct symbol *)(syme + 1);
struct source_line *line;
char pattern[PATH_MAX];
- char *idx;
sprintf(pattern, "<%s>:", symbol->name);
- if (symbol->module) {
- idx = strstr(pattern, "\t");
- if (idx)
- *idx = 0;
- }
-
pthread_mutex_lock(&syme->source_lock);
for (line = syme->lines; line; line = line->next) {
if (strstr(line->line, pattern)) {
@@ -516,8 +494,8 @@ static void print_sym_table(void)
if (verbose)
printf(" - %016llx", sym->start);
printf(" : %s", sym->name);
- if (sym->module)
- printf("\t[%s]", sym->module->name);
+ if (syme->map->dso->name[0] == '[')
+ printf(" \t%s", syme->map->dso->name);
printf("\n");
}
}
@@ -686,6 +664,8 @@ static void handle_keypress(int c)
switch (c) {
case 'd':
prompt_integer(&delay_secs, "Enter display delay");
+ if (delay_secs < 1)
+ delay_secs = 1;
break;
case 'e':
prompt_integer(&print_entries, "Enter display entries (lines)");
@@ -788,7 +768,7 @@ static const char *skip_symbols[] = {
NULL
};
-static int symbol_filter(struct dso *self, struct symbol *sym)
+static int symbol_filter(struct map *map, struct symbol *sym)
{
struct sym_entry *syme;
const char *name = sym->name;
@@ -810,7 +790,8 @@ static int symbol_filter(struct dso *self, struct symbol *sym)
strstr(name, "_text_end"))
return 1;
- syme = dso__sym_priv(self, sym);
+ syme = dso__sym_priv(map->dso, sym);
+ syme->map = map;
pthread_mutex_init(&syme->source_lock, NULL);
if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter))
sym_filter_entry = syme;
@@ -827,34 +808,14 @@ static int symbol_filter(struct dso *self, struct symbol *sym)
static int parse_symbols(void)
{
- struct rb_node *node;
- struct symbol *sym;
- int use_modules = vmlinux_name ? 1 : 0;
-
- kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry));
- if (kernel_dso == NULL)
+ if (dsos__load_kernel(vmlinux_name, sizeof(struct sym_entry),
+ symbol_filter, 1) <= 0)
return -1;
- if (dso__load_kernel(kernel_dso, vmlinux_name, symbol_filter, verbose, use_modules) <= 0)
- goto out_delete_dso;
-
- node = rb_first(&kernel_dso->syms);
- sym = rb_entry(node, struct symbol, rb_node);
- min_ip = sym->start;
-
- node = rb_last(&kernel_dso->syms);
- sym = rb_entry(node, struct symbol, rb_node);
- max_ip = sym->end;
-
if (dump_symtab)
- dso__fprintf(kernel_dso, stderr);
+ dsos__fprintf(stderr);
return 0;
-
-out_delete_dso:
- dso__delete(kernel_dso);
- kernel_dso = NULL;
- return -1;
}
/*
@@ -862,10 +823,11 @@ out_delete_dso:
*/
static void record_ip(u64 ip, int counter)
{
- struct symbol *sym = dso__find_symbol(kernel_dso, ip);
+ struct map *map;
+ struct symbol *sym = kernel_maps__find_symbol(ip, &map);
if (sym != NULL) {
- struct sym_entry *syme = dso__sym_priv(kernel_dso, sym);
+ struct sym_entry *syme = dso__sym_priv(map->dso, sym);
if (!syme->skip) {
syme->count[counter]++;
@@ -911,8 +873,6 @@ static unsigned int mmap_read_head(struct mmap_data *md)
return head;
}
-struct timeval last_read, this_read;
-
static void mmap_read_counter(struct mmap_data *md)
{
unsigned int head = mmap_read_head(md);
@@ -920,8 +880,6 @@ static void mmap_read_counter(struct mmap_data *md)
unsigned char *data = md->base + page_size;
int diff;
- gettimeofday(&this_read, NULL);
-
/*
* If we're further behind than half the buffer, there's a chance
* the writer will bite our tail and mess up the samples under us.
@@ -932,14 +890,7 @@ static void mmap_read_counter(struct mmap_data *md)
*/
diff = head - old;
if (diff > md->mask / 2 || diff < 0) {
- struct timeval iv;
- unsigned long msecs;
-
- timersub(&this_read, &last_read, &iv);
- msecs = iv.tv_sec*1000 + iv.tv_usec/1000;
-
- fprintf(stderr, "WARNING: failed to keep up with mmap data."
- " Last read %lu msecs ago.\n", msecs);
+ fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
/*
* head points to a known good entry, start there.
@@ -947,8 +898,6 @@ static void mmap_read_counter(struct mmap_data *md)
old = head;
}
- last_read = this_read;
-
for (; old != head;) {
event_t *event = (event_t *)&data[old & md->mask];
@@ -1016,7 +965,13 @@ static void start_counter(int i, int counter)
attr = attrs + counter;
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
- attr->freq = freq;
+
+ if (freq) {
+ attr->sample_type |= PERF_SAMPLE_PERIOD;
+ attr->freq = 1;
+ attr->sample_freq = freq;
+ }
+
attr->inherit = (cpu < 0) && inherit;
try_again:
@@ -1171,11 +1126,6 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
if (argc)
usage_with_options(top_usage, options);
- if (freq) {
- default_interval = freq;
- freq = 1;
- }
-
/* CPU and PID are mutually exclusive */
if (target_pid != -1 && profile_cpu != -1) {
printf("WARNING: PID switch overriding CPU\n");
@@ -1192,6 +1142,19 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
parse_symbols();
parse_source(sym_filter_entry);
+
+ /*
+ * User specified count overrides default frequency.
+ */
+ if (default_interval)
+ freq = 0;
+ else if (freq) {
+ default_interval = freq;
+ } else {
+ fprintf(stderr, "frequency and count are zero, aborting\n");
+ exit(EXIT_FAILURE);
+ }
+
/*
* Fill in the ones not specifically initialized via -c:
*/
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 0c5e4f72f2ba..e566bbe3f22d 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -12,28 +12,24 @@
#include "util/debug.h"
#include "util/trace-event.h"
+#include "util/data_map.h"
static char const *input_name = "perf.data";
-static int input;
-static unsigned long page_size;
-static unsigned long mmap_window = 32;
static unsigned long total = 0;
static unsigned long total_comm = 0;
-static struct rb_root threads;
-static struct thread *last_match;
-
static struct perf_header *header;
static u64 sample_type;
+static char *cwd;
+static int cwdlen;
+
static int
process_comm_event(event_t *event, unsigned long offset, unsigned long head)
{
- struct thread *thread;
-
- thread = threads__findnew(event->comm.pid, &threads, &last_match);
+ struct thread *thread = threads__findnew(event->comm.pid);
dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
(void *)(offset + head),
@@ -53,18 +49,12 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_sample_event(event_t *event, unsigned long offset, unsigned long head)
{
- char level;
- int show = 0;
- struct dso *dso = NULL;
- struct thread *thread;
u64 ip = event->ip.ip;
u64 timestamp = -1;
u32 cpu = -1;
u64 period = 1;
void *more_data = event->ip.__more_data;
- int cpumode;
-
- thread = threads__findnew(event->ip.pid, &threads, &last_match);
+ struct thread *thread = threads__findnew(event->ip.pid);
if (sample_type & PERF_SAMPLE_TIME) {
timestamp = *(u64 *)more_data;
@@ -90,37 +80,13 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
(void *)(long)ip,
(long long)period);
- dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
-
if (thread == NULL) {
- eprintf("problem processing %d event, skipping it.\n",
- event->header.type);
+ pr_debug("problem processing %d event, skipping it.\n",
+ event->header.type);
return -1;
}
- cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
-
- if (cpumode == PERF_RECORD_MISC_KERNEL) {
- show = SHOW_KERNEL;
- level = 'k';
-
- dso = kernel_dso;
-
- dump_printf(" ...... dso: %s\n", dso->name);
-
- } else if (cpumode == PERF_RECORD_MISC_USER) {
-
- show = SHOW_USER;
- level = '.';
-
- } else {
- show = SHOW_HV;
- level = 'H';
-
- dso = hypervisor_dso;
-
- dump_printf(" ...... dso: [hypervisor]\n");
- }
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
if (sample_type & PERF_SAMPLE_RAW) {
struct {
@@ -140,121 +106,32 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
return 0;
}
-static int
-process_event(event_t *event, unsigned long offset, unsigned long head)
+static int sample_type_check(u64 type)
{
- trace_event(event);
-
- switch (event->header.type) {
- case PERF_RECORD_MMAP ... PERF_RECORD_LOST:
- return 0;
-
- case PERF_RECORD_COMM:
- return process_comm_event(event, offset, head);
+ sample_type = type;
- case PERF_RECORD_EXIT ... PERF_RECORD_READ:
- return 0;
-
- case PERF_RECORD_SAMPLE:
- return process_sample_event(event, offset, head);
-
- case PERF_RECORD_MAX:
- default:
+ if (!(sample_type & PERF_SAMPLE_RAW)) {
+ fprintf(stderr,
+ "No trace sample to read. Did you call perf record "
+ "without -R?");
return -1;
}
return 0;
}
+static struct perf_file_handler file_handler = {
+ .process_sample_event = process_sample_event,
+ .process_comm_event = process_comm_event,
+ .sample_type_check = sample_type_check,
+};
+
static int __cmd_trace(void)
{
- int ret, rc = EXIT_FAILURE;
- unsigned long offset = 0;
- unsigned long head = 0;
- struct stat perf_stat;
- event_t *event;
- uint32_t size;
- char *buf;
-
- trace_report();
- register_idle_thread(&threads, &last_match);
-
- input = open(input_name, O_RDONLY);
- if (input < 0) {
- perror("failed to open file");
- exit(-1);
- }
-
- ret = fstat(input, &perf_stat);
- if (ret < 0) {
- perror("failed to stat file");
- exit(-1);
- }
-
- if (!perf_stat.st_size) {
- fprintf(stderr, "zero-sized file, nothing to do!\n");
- exit(0);
- }
- header = perf_header__read(input);
- head = header->data_offset;
- sample_type = perf_header__sample_type(header);
-
- if (!(sample_type & PERF_SAMPLE_RAW))
- die("No trace sample to read. Did you call perf record "
- "without -R?");
-
- if (load_kernel() < 0) {
- perror("failed to load kernel symbols");
- return EXIT_FAILURE;
- }
-
-remap:
- buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
- MAP_SHARED, input, offset);
- if (buf == MAP_FAILED) {
- perror("failed to mmap file");
- exit(-1);
- }
-
-more:
- event = (event_t *)(buf + head);
-
- if (head + event->header.size >= page_size * mmap_window) {
- unsigned long shift = page_size * (head / page_size);
- int res;
-
- res = munmap(buf, page_size * mmap_window);
- assert(res == 0);
-
- offset += shift;
- head -= shift;
- goto remap;
- }
-
- size = event->header.size;
-
- if (!size || process_event(event, offset, head) < 0) {
-
- /*
- * assume we lost track of the stream, check alignment, and
- * increment a single u64 in the hope to catch on again 'soon'.
- */
-
- if (unlikely(head & 7))
- head &= ~7ULL;
-
- size = 8;
- }
-
- head += size;
-
- if (offset + head < (unsigned long)perf_stat.st_size)
- goto more;
-
- rc = EXIT_SUCCESS;
- close(input);
+ register_idle_thread();
+ register_perf_file_handler(&file_handler);
- return rc;
+ return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd);
}
static const char * const annotate_usage[] = {
@@ -267,13 +144,14 @@ static const struct option options[] = {
"dump raw trace in ASCII"),
OPT_BOOLEAN('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('l', "latency", &latency_format,
+ "show latency attributes (irqs/preemption disabled, etc)"),
OPT_END()
};
int cmd_trace(int argc, const char **argv, const char *prefix __used)
{
symbol__init();
- page_size = getpagesize();
argc = parse_options(argc, argv, options, annotate_usage, 0);
if (argc) {
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index c570d177d5c6..9cafe5463266 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -89,8 +89,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged)
/*
* Check remaining flags.
*/
- if (!prefixcmp(cmd, "--exec-path")) {
- cmd += 11;
+ if (!prefixcmp(cmd, CMD_EXEC_PATH)) {
+ cmd += strlen(CMD_EXEC_PATH);
if (*cmd == '=')
perf_set_argv_exec_path(cmd + 1);
else {
@@ -117,8 +117,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged)
(*argv)++;
(*argc)--;
handled++;
- } else if (!prefixcmp(cmd, "--perf-dir=")) {
- setenv(PERF_DIR_ENVIRONMENT, cmd + 10, 1);
+ } else if (!prefixcmp(cmd, CMD_PERF_DIR)) {
+ setenv(PERF_DIR_ENVIRONMENT, cmd + strlen(CMD_PERF_DIR), 1);
if (envchanged)
*envchanged = 1;
} else if (!strcmp(cmd, "--work-tree")) {
@@ -131,8 +131,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged)
*envchanged = 1;
(*argv)++;
(*argc)--;
- } else if (!prefixcmp(cmd, "--work-tree=")) {
- setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + 12, 1);
+ } else if (!prefixcmp(cmd, CMD_WORK_TREE)) {
+ setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + strlen(CMD_WORK_TREE), 1);
if (envchanged)
*envchanged = 1;
} else if (!strcmp(cmd, "--debugfs-dir")) {
@@ -146,8 +146,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged)
*envchanged = 1;
(*argv)++;
(*argc)--;
- } else if (!prefixcmp(cmd, "--debugfs-dir=")) {
- strncpy(debugfs_mntpt, cmd + 14, MAXPATHLEN);
+ } else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) {
+ strncpy(debugfs_mntpt, cmd + strlen(CMD_DEBUGFS_DIR), MAXPATHLEN);
debugfs_mntpt[MAXPATHLEN - 1] = '\0';
if (envchanged)
*envchanged = 1;
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN
index c561d1538c03..54552a00a117 100755
--- a/tools/perf/util/PERF-VERSION-GEN
+++ b/tools/perf/util/PERF-VERSION-GEN
@@ -1,7 +1,7 @@
#!/bin/sh
GVF=PERF-VERSION-FILE
-DEF_VER=v0.0.1.PERF
+DEF_VER=v0.0.2.PERF
LF='
'
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 6f8ea9d210b6..918eb376abe3 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -1,10 +1,15 @@
-#ifndef CACHE_H
-#define CACHE_H
+#ifndef __PERF_CACHE_H
+#define __PERF_CACHE_H
#include "util.h"
#include "strbuf.h"
#include "../perf.h"
+#define CMD_EXEC_PATH "--exec-path"
+#define CMD_PERF_DIR "--perf-dir="
+#define CMD_WORK_TREE "--work-tree="
+#define CMD_DEBUGFS_DIR "--debugfs-dir="
+
#define PERF_DIR_ENVIRONMENT "PERF_DIR"
#define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE"
#define DEFAULT_PERF_DIR_ENVIRONMENT ".perf"
@@ -117,4 +122,4 @@ extern char *perf_pathdup(const char *fmt, ...)
extern size_t strlcpy(char *dest, const char *src, size_t size);
-#endif /* CACHE_H */
+#endif /* __PERF_CACHE_H */
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 3b8380f1b478..b3b71258272a 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -206,7 +206,7 @@ fill_node(struct callchain_node *node, struct ip_callchain *chain,
}
node->val_nr = chain->nr - start;
if (!node->val_nr)
- printf("Warning: empty node in callchain tree\n");
+ pr_warning("Warning: empty node in callchain tree\n");
}
static void
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 43cf3ea9e088..ad4626de4c2b 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -58,4 +58,4 @@ static inline u64 cumul_hits(struct callchain_node *node)
int register_callchain_param(struct callchain_param *param);
void append_chain(struct callchain_node *root, struct ip_callchain *chain,
struct symbol **syms);
-#endif
+#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index 58d597564b99..24e8809210bb 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -1,5 +1,5 @@
-#ifndef COLOR_H
-#define COLOR_H
+#ifndef __PERF_COLOR_H
+#define __PERF_COLOR_H
/* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */
#define COLOR_MAXLEN 24
@@ -39,4 +39,4 @@ int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *bu
int percent_color_fprintf(FILE *fp, const char *fmt, double percent);
const char *get_percent_color(double percent);
-#endif /* COLOR_H */
+#endif /* __PERF_COLOR_H */
diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c
new file mode 100644
index 000000000000..18accb8fee4d
--- /dev/null
+++ b/tools/perf/util/data_map.c
@@ -0,0 +1,222 @@
+#include "data_map.h"
+#include "symbol.h"
+#include "util.h"
+#include "debug.h"
+
+
+static struct perf_file_handler *curr_handler;
+static unsigned long mmap_window = 32;
+static char __cwd[PATH_MAX];
+
+static int
+process_event_stub(event_t *event __used,
+ unsigned long offset __used,
+ unsigned long head __used)
+{
+ return 0;
+}
+
+void register_perf_file_handler(struct perf_file_handler *handler)
+{
+ if (!handler->process_sample_event)
+ handler->process_sample_event = process_event_stub;
+ if (!handler->process_mmap_event)
+ handler->process_mmap_event = process_event_stub;
+ if (!handler->process_comm_event)
+ handler->process_comm_event = process_event_stub;
+ if (!handler->process_fork_event)
+ handler->process_fork_event = process_event_stub;
+ if (!handler->process_exit_event)
+ handler->process_exit_event = process_event_stub;
+ if (!handler->process_lost_event)
+ handler->process_lost_event = process_event_stub;
+ if (!handler->process_read_event)
+ handler->process_read_event = process_event_stub;
+ if (!handler->process_throttle_event)
+ handler->process_throttle_event = process_event_stub;
+ if (!handler->process_unthrottle_event)
+ handler->process_unthrottle_event = process_event_stub;
+
+ curr_handler = handler;
+}
+
+static int
+process_event(event_t *event, unsigned long offset, unsigned long head)
+{
+ trace_event(event);
+
+ switch (event->header.type) {
+ case PERF_RECORD_SAMPLE:
+ return curr_handler->process_sample_event(event, offset, head);
+ case PERF_RECORD_MMAP:
+ return curr_handler->process_mmap_event(event, offset, head);
+ case PERF_RECORD_COMM:
+ return curr_handler->process_comm_event(event, offset, head);
+ case PERF_RECORD_FORK:
+ return curr_handler->process_fork_event(event, offset, head);
+ case PERF_RECORD_EXIT:
+ return curr_handler->process_exit_event(event, offset, head);
+ case PERF_RECORD_LOST:
+ return curr_handler->process_lost_event(event, offset, head);
+ case PERF_RECORD_READ:
+ return curr_handler->process_read_event(event, offset, head);
+ case PERF_RECORD_THROTTLE:
+ return curr_handler->process_throttle_event(event, offset, head);
+ case PERF_RECORD_UNTHROTTLE:
+ return curr_handler->process_unthrottle_event(event, offset, head);
+ default:
+ curr_handler->total_unknown++;
+ return -1;
+ }
+}
+
+int mmap_dispatch_perf_file(struct perf_header **pheader,
+ const char *input_name,
+ int force,
+ int full_paths,
+ int *cwdlen,
+ char **cwd)
+{
+ int ret, rc = EXIT_FAILURE;
+ struct perf_header *header;
+ unsigned long head, shift;
+ unsigned long offset = 0;
+ struct stat input_stat;
+ size_t page_size;
+ u64 sample_type;
+ event_t *event;
+ uint32_t size;
+ int input;
+ char *buf;
+
+ if (!curr_handler)
+ die("Forgot to register perf file handler");
+
+ page_size = getpagesize();
+
+ input = open(input_name, O_RDONLY);
+ if (input < 0) {
+ fprintf(stderr, " failed to open file: %s", input_name);
+ if (!strcmp(input_name, "perf.data"))
+ fprintf(stderr, " (try 'perf record' first)");
+ fprintf(stderr, "\n");
+ exit(-1);
+ }
+
+ ret = fstat(input, &input_stat);
+ if (ret < 0) {
+ perror("failed to stat file");
+ exit(-1);
+ }
+
+ if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
+ fprintf(stderr, "file: %s not owned by current user or root\n",
+ input_name);
+ exit(-1);
+ }
+
+ if (!input_stat.st_size) {
+ fprintf(stderr, "zero-sized file, nothing to do!\n");
+ exit(0);
+ }
+
+ *pheader = perf_header__read(input);
+ header = *pheader;
+ head = header->data_offset;
+
+ sample_type = perf_header__sample_type(header);
+
+ if (curr_handler->sample_type_check)
+ if (curr_handler->sample_type_check(sample_type) < 0)
+ exit(-1);
+
+ if (load_kernel(0, NULL) < 0) {
+ perror("failed to load kernel symbols");
+ return EXIT_FAILURE;
+ }
+
+ if (!full_paths) {
+ if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
+ perror("failed to get the current directory");
+ return EXIT_FAILURE;
+ }
+ *cwd = __cwd;
+ *cwdlen = strlen(*cwd);
+ } else {
+ *cwd = NULL;
+ *cwdlen = 0;
+ }
+
+ shift = page_size * (head / page_size);
+ offset += shift;
+ head -= shift;
+
+remap:
+ buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
+ MAP_SHARED, input, offset);
+ if (buf == MAP_FAILED) {
+ perror("failed to mmap file");
+ exit(-1);
+ }
+
+more:
+ event = (event_t *)(buf + head);
+
+ size = event->header.size;
+ if (!size)
+ size = 8;
+
+ if (head + event->header.size >= page_size * mmap_window) {
+ int munmap_ret;
+
+ shift = page_size * (head / page_size);
+
+ munmap_ret = munmap(buf, page_size * mmap_window);
+ assert(munmap_ret == 0);
+
+ offset += shift;
+ head -= shift;
+ goto remap;
+ }
+
+ size = event->header.size;
+
+ dump_printf("\n%p [%p]: event: %d\n",
+ (void *)(offset + head),
+ (void *)(long)event->header.size,
+ event->header.type);
+
+ if (!size || process_event(event, offset, head) < 0) {
+
+ dump_printf("%p [%p]: skipping unknown header type: %d\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->header.type);
+
+ /*
+ * assume we lost track of the stream, check alignment, and
+ * increment a single u64 in the hope to catch on again 'soon'.
+ */
+
+ if (unlikely(head & 7))
+ head &= ~7ULL;
+
+ size = 8;
+ }
+
+ head += size;
+
+ if (offset + head >= header->data_offset + header->data_size)
+ goto done;
+
+ if (offset + head < (unsigned long)input_stat.st_size)
+ goto more;
+
+done:
+ rc = EXIT_SUCCESS;
+ close(input);
+
+ return rc;
+}
+
+
diff --git a/tools/perf/util/data_map.h b/tools/perf/util/data_map.h
new file mode 100644
index 000000000000..716d1053b074
--- /dev/null
+++ b/tools/perf/util/data_map.h
@@ -0,0 +1,31 @@
+#ifndef __PERF_DATAMAP_H
+#define __PERF_DATAMAP_H
+
+#include "event.h"
+#include "header.h"
+
+typedef int (*event_type_handler_t)(event_t *, unsigned long, unsigned long);
+
+struct perf_file_handler {
+ event_type_handler_t process_sample_event;
+ event_type_handler_t process_mmap_event;
+ event_type_handler_t process_comm_event;
+ event_type_handler_t process_fork_event;
+ event_type_handler_t process_exit_event;
+ event_type_handler_t process_lost_event;
+ event_type_handler_t process_read_event;
+ event_type_handler_t process_throttle_event;
+ event_type_handler_t process_unthrottle_event;
+ int (*sample_type_check)(u64 sample_type);
+ unsigned long total_unknown;
+};
+
+void register_perf_file_handler(struct perf_file_handler *handler);
+int mmap_dispatch_perf_file(struct perf_header **pheader,
+ const char *input_name,
+ int force,
+ int full_paths,
+ int *cwdlen,
+ char **cwd);
+
+#endif
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index e8ca98fe0bd4..28d520d5a1fb 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -13,12 +13,12 @@
int verbose = 0;
int dump_trace = 0;
-int eprintf(const char *fmt, ...)
+int eprintf(int level, const char *fmt, ...)
{
va_list args;
int ret = 0;
- if (verbose) {
+ if (verbose >= level) {
va_start(args, fmt);
ret = vfprintf(stderr, fmt, args);
va_end(args);
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index 437eea58ce40..e8b18a1f87a4 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -1,8 +1,13 @@
/* For debugging general purposes */
+#ifndef __PERF_DEBUG_H
+#define __PERF_DEBUG_H
extern int verbose;
extern int dump_trace;
-int eprintf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
+int eprintf(int level,
+ const char *fmt, ...) __attribute__((format(printf, 2, 3)));
int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
void trace_event(event_t *event);
+
+#endif /* __PERF_DEBUG_H */
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 2c9c26d6ded0..d972b4b0d38c 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -1,14 +1,10 @@
#ifndef __PERF_RECORD_H
#define __PERF_RECORD_H
+
#include "../perf.h"
#include "util.h"
#include <linux/list.h>
-
-enum {
- SHOW_KERNEL = 1,
- SHOW_USER = 2,
- SHOW_HV = 4,
-};
+#include <linux/rbtree.h>
/*
* PERF_SAMPLE_IP | PERF_SAMPLE_TID | *
@@ -78,11 +74,15 @@ typedef union event_union {
} event_t;
struct map {
- struct list_head node;
+ union {
+ struct rb_node rb_node;
+ struct list_head node;
+ };
u64 start;
u64 end;
u64 pgoff;
u64 (*map_ip)(struct map *, u64);
+ u64 (*unmap_ip)(struct map *, u64);
struct dso *dso;
};
@@ -91,14 +91,24 @@ static inline u64 map__map_ip(struct map *map, u64 ip)
return ip - map->start + map->pgoff;
}
-static inline u64 vdso__map_ip(struct map *map __used, u64 ip)
+static inline u64 map__unmap_ip(struct map *map, u64 ip)
+{
+ return ip + map->start - map->pgoff;
+}
+
+static inline u64 identity__map_ip(struct map *map __used, u64 ip)
{
return ip;
}
-struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen);
+struct symbol;
+
+typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
+
+struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen,
+ unsigned int sym_priv_size, symbol_filter_t filter);
struct map *map__clone(struct map *self);
int map__overlap(struct map *l, struct map *r);
size_t map__fprintf(struct map *self, FILE *fp);
-#endif
+#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/exec_cmd.h b/tools/perf/util/exec_cmd.h
index effe25eb1545..31647ac92ed1 100644
--- a/tools/perf/util/exec_cmd.h
+++ b/tools/perf/util/exec_cmd.h
@@ -1,5 +1,5 @@
-#ifndef PERF_EXEC_CMD_H
-#define PERF_EXEC_CMD_H
+#ifndef __PERF_EXEC_CMD_H
+#define __PERF_EXEC_CMD_H
extern void perf_set_argv_exec_path(const char *exec_path);
extern const char *perf_extract_argv0_path(const char *path);
@@ -10,4 +10,4 @@ extern int execv_perf_cmd(const char **argv); /* NULL terminated */
extern int execl_perf_cmd(const char *cmd, ...);
extern const char *system_path(const char *path);
-#endif /* PERF_EXEC_CMD_H */
+#endif /* __PERF_EXEC_CMD_H */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index e306857b2c2b..7d26659b806c 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -5,6 +5,8 @@
#include "util.h"
#include "header.h"
+#include "../perf.h"
+#include "trace-event.h"
/*
* Create new perf.data header attribute:
@@ -46,23 +48,17 @@ void perf_header_attr__add_id(struct perf_header_attr *self, u64 id)
*/
struct perf_header *perf_header__new(void)
{
- struct perf_header *self = malloc(sizeof(*self));
+ struct perf_header *self = calloc(sizeof(*self), 1);
if (!self)
die("nomem");
- self->frozen = 0;
-
- self->attrs = 0;
self->size = 1;
self->attr = malloc(sizeof(void *));
if (!self->attr)
die("nomem");
- self->data_offset = 0;
- self->data_size = 0;
-
return self;
}
@@ -97,7 +93,7 @@ static struct perf_trace_event_type *events;
void perf_header__push_event(u64 id, const char *name)
{
if (strlen(name) > MAX_EVENT_NAME)
- printf("Event %s will be truncated\n", name);
+ pr_warning("Event %s will be truncated\n", name);
if (!events) {
events = malloc(sizeof(struct perf_trace_event_type));
@@ -145,8 +141,14 @@ struct perf_file_header {
struct perf_file_section attrs;
struct perf_file_section data;
struct perf_file_section event_types;
+ DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
};
+void perf_header__feat_trace_info(struct perf_header *header)
+{
+ set_bit(HEADER_TRACE_INFO, header->adds_features);
+}
+
static void do_write(int fd, void *buf, size_t size)
{
while (size) {
@@ -160,6 +162,32 @@ static void do_write(int fd, void *buf, size_t size)
}
}
+static void perf_header__adds_write(struct perf_header *self, int fd)
+{
+ struct perf_file_section trace_sec;
+ u64 cur_offset = lseek(fd, 0, SEEK_CUR);
+ unsigned long *feat_mask = self->adds_features;
+
+ if (test_bit(HEADER_TRACE_INFO, feat_mask)) {
+ /* Write trace info */
+ trace_sec.offset = lseek(fd, sizeof(trace_sec), SEEK_CUR);
+ read_tracing_data(fd, attrs, nr_counters);
+ trace_sec.size = lseek(fd, 0, SEEK_CUR) - trace_sec.offset;
+
+ /* Write trace info headers */
+ lseek(fd, cur_offset, SEEK_SET);
+ do_write(fd, &trace_sec, sizeof(trace_sec));
+
+ /*
+ * Update cur_offset. So that other (future)
+ * features can set their own infos in this place. But if we are
+ * the only feature, at least that seeks to the place the data
+ * should begin.
+ */
+ cur_offset = lseek(fd, trace_sec.offset + trace_sec.size, SEEK_SET);
+ }
+};
+
void perf_header__write(struct perf_header *self, int fd)
{
struct perf_file_header f_header;
@@ -198,6 +226,7 @@ void perf_header__write(struct perf_header *self, int fd)
if (events)
do_write(fd, events, self->event_size);
+ perf_header__adds_write(self, fd);
self->data_offset = lseek(fd, 0, SEEK_CUR);
@@ -219,6 +248,8 @@ void perf_header__write(struct perf_header *self, int fd)
},
};
+ memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features));
+
lseek(fd, 0, SEEK_SET);
do_write(fd, &f_header, sizeof(f_header));
lseek(fd, self->data_offset + self->data_size, SEEK_SET);
@@ -241,6 +272,20 @@ static void do_read(int fd, void *buf, size_t size)
}
}
+static void perf_header__adds_read(struct perf_header *self, int fd)
+{
+ const unsigned long *feat_mask = self->adds_features;
+
+ if (test_bit(HEADER_TRACE_INFO, feat_mask)) {
+ struct perf_file_section trace_sec;
+
+ do_read(fd, &trace_sec, sizeof(trace_sec));
+ lseek(fd, trace_sec.offset, SEEK_SET);
+ trace_report(fd);
+ lseek(fd, trace_sec.offset + trace_sec.size, SEEK_SET);
+ }
+};
+
struct perf_header *perf_header__read(int fd)
{
struct perf_header *self = perf_header__new();
@@ -254,10 +299,16 @@ struct perf_header *perf_header__read(int fd)
do_read(fd, &f_header, sizeof(f_header));
if (f_header.magic != PERF_MAGIC ||
- f_header.size != sizeof(f_header) ||
f_header.attr_size != sizeof(f_attr))
die("incompatible file format");
+ if (f_header.size != sizeof(f_header)) {
+ /* Support the previous format */
+ if (f_header.size == offsetof(typeof(f_header), adds_features))
+ bitmap_zero(f_header.adds_features, HEADER_FEAT_BITS);
+ else
+ die("incompatible file format");
+ }
nr_attrs = f_header.attrs.size / sizeof(f_attr);
lseek(fd, f_header.attrs.offset, SEEK_SET);
@@ -290,6 +341,11 @@ struct perf_header *perf_header__read(int fd)
do_read(fd, events, f_header.event_types.size);
event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
}
+
+ memcpy(&self->adds_features, &f_header.adds_features, sizeof(f_header.adds_features));
+
+ perf_header__adds_read(self, fd);
+
self->event_offset = f_header.event_types.offset;
self->event_size = f_header.event_types.size;
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index a0761bc7863c..2ea9dfb1236a 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -1,10 +1,12 @@
-#ifndef _PERF_HEADER_H
-#define _PERF_HEADER_H
+#ifndef __PERF_HEADER_H
+#define __PERF_HEADER_H
#include "../../../include/linux/perf_event.h"
#include <sys/types.h>
#include "types.h"
+#include <linux/bitmap.h>
+
struct perf_header_attr {
struct perf_event_attr attr;
int ids, size;
@@ -12,15 +14,20 @@ struct perf_header_attr {
off_t id_offset;
};
+#define HEADER_TRACE_INFO 1
+
+#define HEADER_FEAT_BITS 256
+
struct perf_header {
- int frozen;
- int attrs, size;
+ int frozen;
+ int attrs, size;
struct perf_header_attr **attr;
- s64 attr_offset;
- u64 data_offset;
- u64 data_size;
- u64 event_offset;
- u64 event_size;
+ s64 attr_offset;
+ u64 data_offset;
+ u64 data_size;
+ u64 event_offset;
+ u64 event_size;
+ DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
};
struct perf_header *perf_header__read(int fd);
@@ -40,8 +47,8 @@ void perf_header_attr__add_id(struct perf_header_attr *self, u64 id);
u64 perf_header__sample_type(struct perf_header *header);
struct perf_event_attr *
perf_header__find_attr(u64 id, struct perf_header *header);
-
+void perf_header__feat_trace_info(struct perf_header *header);
struct perf_header *perf_header__new(void);
-#endif /* _PERF_HEADER_H */
+#endif /* __PERF_HEADER_H */
diff --git a/tools/perf/util/help.h b/tools/perf/util/help.h
index 7128783637b4..7f5c6dedd714 100644
--- a/tools/perf/util/help.h
+++ b/tools/perf/util/help.h
@@ -1,5 +1,5 @@
-#ifndef HELP_H
-#define HELP_H
+#ifndef __PERF_HELP_H
+#define __PERF_HELP_H
struct cmdnames {
size_t alloc;
@@ -26,4 +26,4 @@ int is_in_cmdlist(struct cmdnames *c, const char *s);
void list_commands(const char *title, struct cmdnames *main_cmds,
struct cmdnames *other_cmds);
-#endif /* HELP_H */
+#endif /* __PERF_HELP_H */
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
new file mode 100644
index 000000000000..7393a02fd8d4
--- /dev/null
+++ b/tools/perf/util/hist.c
@@ -0,0 +1,210 @@
+#include "hist.h"
+
+struct rb_root hist;
+struct rb_root collapse_hists;
+struct rb_root output_hists;
+int callchain;
+
+struct callchain_param callchain_param = {
+ .mode = CHAIN_GRAPH_REL,
+ .min_percent = 0.5
+};
+
+unsigned long total;
+unsigned long total_mmap;
+unsigned long total_comm;
+unsigned long total_fork;
+unsigned long total_unknown;
+unsigned long total_lost;
+
+/*
+ * histogram, sorted on item, collects counts
+ */
+
+struct hist_entry *__hist_entry__add(struct thread *thread, struct map *map,
+ struct symbol *sym,
+ struct symbol *sym_parent,
+ u64 ip, u64 count, char level, bool *hit)
+{
+ struct rb_node **p = &hist.rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *he;
+ struct hist_entry entry = {
+ .thread = thread,
+ .map = map,
+ .sym = sym,
+ .ip = ip,
+ .level = level,
+ .count = count,
+ .parent = sym_parent,
+ };
+ int cmp;
+
+ while (*p != NULL) {
+ parent = *p;
+ he = rb_entry(parent, struct hist_entry, rb_node);
+
+ cmp = hist_entry__cmp(&entry, he);
+
+ if (!cmp) {
+ *hit = true;
+ return he;
+ }
+
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ he = malloc(sizeof(*he));
+ if (!he)
+ return NULL;
+ *he = entry;
+ rb_link_node(&he->rb_node, parent, p);
+ rb_insert_color(&he->rb_node, &hist);
+ *hit = false;
+ return he;
+}
+
+int64_t
+hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct sort_entry *se;
+ int64_t cmp = 0;
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ cmp = se->cmp(left, right);
+ if (cmp)
+ break;
+ }
+
+ return cmp;
+}
+
+int64_t
+hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
+{
+ struct sort_entry *se;
+ int64_t cmp = 0;
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ int64_t (*f)(struct hist_entry *, struct hist_entry *);
+
+ f = se->collapse ?: se->cmp;
+
+ cmp = f(left, right);
+ if (cmp)
+ break;
+ }
+
+ return cmp;
+}
+
+void hist_entry__free(struct hist_entry *he)
+{
+ free(he);
+}
+
+/*
+ * collapse the histogram
+ */
+
+void collapse__insert_entry(struct hist_entry *he)
+{
+ struct rb_node **p = &collapse_hists.rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter;
+ int64_t cmp;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node);
+
+ cmp = hist_entry__collapse(iter, he);
+
+ if (!cmp) {
+ iter->count += he->count;
+ hist_entry__free(he);
+ return;
+ }
+
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&he->rb_node, parent, p);
+ rb_insert_color(&he->rb_node, &collapse_hists);
+}
+
+void collapse__resort(void)
+{
+ struct rb_node *next;
+ struct hist_entry *n;
+
+ if (!sort__need_collapse)
+ return;
+
+ next = rb_first(&hist);
+ while (next) {
+ n = rb_entry(next, struct hist_entry, rb_node);
+ next = rb_next(&n->rb_node);
+
+ rb_erase(&n->rb_node, &hist);
+ collapse__insert_entry(n);
+ }
+}
+
+/*
+ * reverse the map, sort on count.
+ */
+
+void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits)
+{
+ struct rb_node **p = &output_hists.rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter;
+
+ if (callchain)
+ callchain_param.sort(&he->sorted_chain, &he->callchain,
+ min_callchain_hits, &callchain_param);
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node);
+
+ if (he->count > iter->count)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&he->rb_node, parent, p);
+ rb_insert_color(&he->rb_node, &output_hists);
+}
+
+void output__resort(u64 total_samples)
+{
+ struct rb_node *next;
+ struct hist_entry *n;
+ struct rb_root *tree = &hist;
+ u64 min_callchain_hits;
+
+ min_callchain_hits =
+ total_samples * (callchain_param.min_percent / 100);
+
+ if (sort__need_collapse)
+ tree = &collapse_hists;
+
+ next = rb_first(tree);
+
+ while (next) {
+ n = rb_entry(next, struct hist_entry, rb_node);
+ next = rb_next(&n->rb_node);
+
+ rb_erase(&n->rb_node, tree);
+ output__insert_entry(n, min_callchain_hits);
+ }
+}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
new file mode 100644
index 000000000000..ac2149c559b0
--- /dev/null
+++ b/tools/perf/util/hist.h
@@ -0,0 +1,50 @@
+#ifndef __PERF_HIST_H
+#define __PERF_HIST_H
+#include "../builtin.h"
+
+#include "util.h"
+
+#include "color.h"
+#include <linux/list.h>
+#include "cache.h"
+#include <linux/rbtree.h>
+#include "symbol.h"
+#include "string.h"
+#include "callchain.h"
+#include "strlist.h"
+#include "values.h"
+
+#include "../perf.h"
+#include "debug.h"
+#include "header.h"
+
+#include "parse-options.h"
+#include "parse-events.h"
+
+#include "thread.h"
+#include "sort.h"
+
+extern struct rb_root hist;
+extern struct rb_root collapse_hists;
+extern struct rb_root output_hists;
+extern int callchain;
+extern struct callchain_param callchain_param;
+extern unsigned long total;
+extern unsigned long total_mmap;
+extern unsigned long total_comm;
+extern unsigned long total_fork;
+extern unsigned long total_unknown;
+extern unsigned long total_lost;
+
+struct hist_entry *__hist_entry__add(struct thread *thread, struct map *map,
+ struct symbol *sym, struct symbol *parent,
+ u64 ip, u64 count, char level, bool *hit);
+extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *);
+extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *);
+extern void hist_entry__free(struct hist_entry *);
+extern void collapse__insert_entry(struct hist_entry *);
+extern void collapse__resort(void);
+extern void output__insert_entry(struct hist_entry *, u64);
+extern void output__resort(u64);
+
+#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/include/asm/asm-offsets.h b/tools/perf/util/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..ed538942523d
--- /dev/null
+++ b/tools/perf/util/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+/* stub */
diff --git a/tools/perf/util/include/asm/bitops.h b/tools/perf/util/include/asm/bitops.h
new file mode 100644
index 000000000000..58e9817ffae0
--- /dev/null
+++ b/tools/perf/util/include/asm/bitops.h
@@ -0,0 +1,18 @@
+#ifndef _PERF_ASM_BITOPS_H_
+#define _PERF_ASM_BITOPS_H_
+
+#include <sys/types.h>
+#include "../../types.h"
+#include <linux/compiler.h>
+
+/* CHECKME: Not sure both always match */
+#define BITS_PER_LONG __WORDSIZE
+
+#include "../../../../include/asm-generic/bitops/__fls.h"
+#include "../../../../include/asm-generic/bitops/fls.h"
+#include "../../../../include/asm-generic/bitops/fls64.h"
+#include "../../../../include/asm-generic/bitops/__ffs.h"
+#include "../../../../include/asm-generic/bitops/ffz.h"
+#include "../../../../include/asm-generic/bitops/hweight.h"
+
+#endif
diff --git a/tools/perf/util/include/asm/byteorder.h b/tools/perf/util/include/asm/byteorder.h
new file mode 100644
index 000000000000..b722abe3a626
--- /dev/null
+++ b/tools/perf/util/include/asm/byteorder.h
@@ -0,0 +1,2 @@
+#include <asm/types.h>
+#include "../../../../include/linux/swab.h"
diff --git a/tools/perf/util/include/asm/swab.h b/tools/perf/util/include/asm/swab.h
new file mode 100644
index 000000000000..ed538942523d
--- /dev/null
+++ b/tools/perf/util/include/asm/swab.h
@@ -0,0 +1 @@
+/* stub */
diff --git a/tools/perf/util/include/asm/uaccess.h b/tools/perf/util/include/asm/uaccess.h
new file mode 100644
index 000000000000..d0f72b8fcc35
--- /dev/null
+++ b/tools/perf/util/include/asm/uaccess.h
@@ -0,0 +1,14 @@
+#ifndef _PERF_ASM_UACCESS_H_
+#define _PERF_ASM_UACCESS_H_
+
+#define __get_user(src, dest) \
+({ \
+ (src) = *dest; \
+ 0; \
+})
+
+#define get_user __get_user
+
+#define access_ok(type, addr, size) 1
+
+#endif
diff --git a/tools/perf/util/include/linux/bitmap.h b/tools/perf/util/include/linux/bitmap.h
new file mode 100644
index 000000000000..821c1033bccf
--- /dev/null
+++ b/tools/perf/util/include/linux/bitmap.h
@@ -0,0 +1,2 @@
+#include "../../../../include/linux/bitmap.h"
+#include "../../../../include/asm-generic/bitops/find.h"
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
new file mode 100644
index 000000000000..ace57c36d1d0
--- /dev/null
+++ b/tools/perf/util/include/linux/bitops.h
@@ -0,0 +1,27 @@
+#ifndef _PERF_LINUX_BITOPS_H_
+#define _PERF_LINUX_BITOPS_H_
+
+#define __KERNEL__
+
+#define CONFIG_GENERIC_FIND_NEXT_BIT
+#define CONFIG_GENERIC_FIND_FIRST_BIT
+#include "../../../../include/linux/bitops.h"
+
+static inline void set_bit(int nr, unsigned long *addr)
+{
+ addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG);
+}
+
+static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
+{
+ return ((1UL << (nr % BITS_PER_LONG)) &
+ (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
+}
+
+unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+
+unsigned long generic_find_next_le_bit(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+
+#endif
diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h
new file mode 100644
index 000000000000..dfb0713ed47f
--- /dev/null
+++ b/tools/perf/util/include/linux/compiler.h
@@ -0,0 +1,10 @@
+#ifndef _PERF_LINUX_COMPILER_H_
+#define _PERF_LINUX_COMPILER_H_
+
+#ifndef __always_inline
+#define __always_inline inline
+#endif
+#define __user
+#define __attribute_const__
+
+#endif
diff --git a/tools/perf/util/include/linux/ctype.h b/tools/perf/util/include/linux/ctype.h
new file mode 100644
index 000000000000..bae5783282ef
--- /dev/null
+++ b/tools/perf/util/include/linux/ctype.h
@@ -0,0 +1 @@
+#include "../../../../include/linux/ctype.h"
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h
index a6b87390cb52..21c0274c02fa 100644
--- a/tools/perf/util/include/linux/kernel.h
+++ b/tools/perf/util/include/linux/kernel.h
@@ -1,6 +1,16 @@
#ifndef PERF_LINUX_KERNEL_H_
#define PERF_LINUX_KERNEL_H_
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
+#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
+
#ifndef offsetof
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif
@@ -26,4 +36,70 @@
_max1 > _max2 ? _max1 : _max2; })
#endif
+#ifndef min
+#define min(x, y) ({ \
+ typeof(x) _min1 = (x); \
+ typeof(y) _min2 = (y); \
+ (void) (&_min1 == &_min2); \
+ _min1 < _min2 ? _min1 : _min2; })
+#endif
+
+#ifndef BUG_ON
+#define BUG_ON(cond) assert(!(cond))
+#endif
+
+/*
+ * Both need more care to handle endianness
+ * (Don't use bitmap_copy_le() for now)
+ */
+#define cpu_to_le64(x) (x)
+#define cpu_to_le32(x) (x)
+
+static inline int
+vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+ int i;
+ ssize_t ssize = size;
+
+ i = vsnprintf(buf, size, fmt, args);
+
+ return (i >= ssize) ? (ssize - 1) : i;
+}
+
+static inline int scnprintf(char * buf, size_t size, const char * fmt, ...)
+{
+ va_list args;
+ ssize_t ssize = size;
+ int i;
+
+ va_start(args, fmt);
+ i = vsnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ return (i >= ssize) ? (ssize - 1) : i;
+}
+
+static inline unsigned long
+simple_strtoul(const char *nptr, char **endptr, int base)
+{
+ return strtoul(nptr, endptr, base);
+}
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) fmt
+#endif
+
+#define pr_err(fmt, ...) \
+ do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+#define pr_warning(fmt, ...) \
+ do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+#define pr_info(fmt, ...) \
+ do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+#define pr_debug(fmt, ...) \
+ eprintf(1, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debugN(n, fmt, ...) \
+ eprintf(n, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
+
#endif
diff --git a/tools/perf/util/include/linux/string.h b/tools/perf/util/include/linux/string.h
new file mode 100644
index 000000000000..3b2f5900276f
--- /dev/null
+++ b/tools/perf/util/include/linux/string.h
@@ -0,0 +1 @@
+#include <string.h>
diff --git a/tools/perf/util/include/linux/types.h b/tools/perf/util/include/linux/types.h
new file mode 100644
index 000000000000..196862a81a21
--- /dev/null
+++ b/tools/perf/util/include/linux/types.h
@@ -0,0 +1,9 @@
+#ifndef _PERF_LINUX_TYPES_H_
+#define _PERF_LINUX_TYPES_H_
+
+#include <asm/types.h>
+
+#define DECLARE_BITMAP(name,bits) \
+ unsigned long name[BITS_TO_LONGS(bits)]
+
+#endif
diff --git a/tools/perf/util/levenshtein.h b/tools/perf/util/levenshtein.h
index 0173abeef52c..b0fcb6d8a881 100644
--- a/tools/perf/util/levenshtein.h
+++ b/tools/perf/util/levenshtein.h
@@ -1,8 +1,8 @@
-#ifndef LEVENSHTEIN_H
-#define LEVENSHTEIN_H
+#ifndef __PERF_LEVENSHTEIN_H
+#define __PERF_LEVENSHTEIN_H
int levenshtein(const char *string1, const char *string2,
int swap_penalty, int substition_penalty,
int insertion_penalty, int deletion_penalty);
-#endif
+#endif /* __PERF_LEVENSHTEIN_H */
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 804e02382739..c1c556825343 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -3,6 +3,7 @@
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
+#include "debug.h"
static inline int is_anon_memory(const char *filename)
{
@@ -19,7 +20,8 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen)
return n;
}
- struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen)
+struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen,
+ unsigned int sym_priv_size, symbol_filter_t filter)
{
struct map *self = malloc(sizeof(*self));
@@ -27,6 +29,7 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen)
const char *filename = event->filename;
char newfilename[PATH_MAX];
int anon;
+ bool new_dso;
if (cwd) {
int n = strcommon(filename, cwd, cwdlen);
@@ -49,14 +52,29 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen)
self->end = event->start + event->len;
self->pgoff = event->pgoff;
- self->dso = dsos__findnew(filename);
+ self->dso = dsos__findnew(filename, sym_priv_size, &new_dso);
if (self->dso == NULL)
goto out_delete;
+ if (new_dso) {
+ int nr = dso__load(self->dso, self, filter);
+
+ if (nr < 0)
+ pr_warning("Failed to open %s, continuing "
+ "without symbols\n",
+ self->dso->long_name);
+ else if (nr == 0)
+ pr_warning("No symbols found in %s, maybe "
+ "install a debug package?\n",
+ self->dso->long_name);
+ }
+
if (self->dso == vdso || anon)
- self->map_ip = vdso__map_ip;
- else
+ self->map_ip = self->unmap_ip = identity__map_ip;
+ else {
self->map_ip = map__map_ip;
+ self->unmap_ip = map__unmap_ip;
+ }
}
return self;
out_delete:
diff --git a/tools/perf/util/module.c b/tools/perf/util/module.c
deleted file mode 100644
index 0d8c85defcd2..000000000000
--- a/tools/perf/util/module.c
+++ /dev/null
@@ -1,545 +0,0 @@
-#include "util.h"
-#include "../perf.h"
-#include "string.h"
-#include "module.h"
-
-#include <libelf.h>
-#include <libgen.h>
-#include <gelf.h>
-#include <elf.h>
-#include <dirent.h>
-#include <sys/utsname.h>
-
-static unsigned int crc32(const char *p, unsigned int len)
-{
- int i;
- unsigned int crc = 0;
-
- while (len--) {
- crc ^= *p++;
- for (i = 0; i < 8; i++)
- crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
- }
- return crc;
-}
-
-/* module section methods */
-
-struct sec_dso *sec_dso__new_dso(const char *name)
-{
- struct sec_dso *self = malloc(sizeof(*self) + strlen(name) + 1);
-
- if (self != NULL) {
- strcpy(self->name, name);
- self->secs = RB_ROOT;
- self->find_section = sec_dso__find_section;
- }
-
- return self;
-}
-
-static void sec_dso__delete_section(struct section *self)
-{
- free(((void *)self));
-}
-
-void sec_dso__delete_sections(struct sec_dso *self)
-{
- struct section *pos;
- struct rb_node *next = rb_first(&self->secs);
-
- while (next) {
- pos = rb_entry(next, struct section, rb_node);
- next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, &self->secs);
- sec_dso__delete_section(pos);
- }
-}
-
-void sec_dso__delete_self(struct sec_dso *self)
-{
- sec_dso__delete_sections(self);
- free(self);
-}
-
-static void sec_dso__insert_section(struct sec_dso *self, struct section *sec)
-{
- struct rb_node **p = &self->secs.rb_node;
- struct rb_node *parent = NULL;
- const u64 hash = sec->hash;
- struct section *s;
-
- while (*p != NULL) {
- parent = *p;
- s = rb_entry(parent, struct section, rb_node);
- if (hash < s->hash)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
- rb_link_node(&sec->rb_node, parent, p);
- rb_insert_color(&sec->rb_node, &self->secs);
-}
-
-struct section *sec_dso__find_section(struct sec_dso *self, const char *name)
-{
- struct rb_node *n;
- u64 hash;
- int len;
-
- if (self == NULL)
- return NULL;
-
- len = strlen(name);
- hash = crc32(name, len);
-
- n = self->secs.rb_node;
-
- while (n) {
- struct section *s = rb_entry(n, struct section, rb_node);
-
- if (hash < s->hash)
- n = n->rb_left;
- else if (hash > s->hash)
- n = n->rb_right;
- else {
- if (!strcmp(name, s->name))
- return s;
- else
- n = rb_next(&s->rb_node);
- }
- }
-
- return NULL;
-}
-
-static size_t sec_dso__fprintf_section(struct section *self, FILE *fp)
-{
- return fprintf(fp, "name:%s vma:%llx path:%s\n",
- self->name, self->vma, self->path);
-}
-
-size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp)
-{
- size_t ret = fprintf(fp, "dso: %s\n", self->name);
-
- struct rb_node *nd;
- for (nd = rb_first(&self->secs); nd; nd = rb_next(nd)) {
- struct section *pos = rb_entry(nd, struct section, rb_node);
- ret += sec_dso__fprintf_section(pos, fp);
- }
-
- return ret;
-}
-
-static struct section *section__new(const char *name, const char *path)
-{
- struct section *self = calloc(1, sizeof(*self));
-
- if (!self)
- goto out_failure;
-
- self->name = calloc(1, strlen(name) + 1);
- if (!self->name)
- goto out_failure;
-
- self->path = calloc(1, strlen(path) + 1);
- if (!self->path)
- goto out_failure;
-
- strcpy(self->name, name);
- strcpy(self->path, path);
- self->hash = crc32(self->name, strlen(name));
-
- return self;
-
-out_failure:
- if (self) {
- if (self->name)
- free(self->name);
- if (self->path)
- free(self->path);
- free(self);
- }
-
- return NULL;
-}
-
-/* module methods */
-
-struct mod_dso *mod_dso__new_dso(const char *name)
-{
- struct mod_dso *self = malloc(sizeof(*self) + strlen(name) + 1);
-
- if (self != NULL) {
- strcpy(self->name, name);
- self->mods = RB_ROOT;
- self->find_module = mod_dso__find_module;
- }
-
- return self;
-}
-
-static void mod_dso__delete_module(struct module *self)
-{
- free(((void *)self));
-}
-
-void mod_dso__delete_modules(struct mod_dso *self)
-{
- struct module *pos;
- struct rb_node *next = rb_first(&self->mods);
-
- while (next) {
- pos = rb_entry(next, struct module, rb_node);
- next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, &self->mods);
- mod_dso__delete_module(pos);
- }
-}
-
-void mod_dso__delete_self(struct mod_dso *self)
-{
- mod_dso__delete_modules(self);
- free(self);
-}
-
-static void mod_dso__insert_module(struct mod_dso *self, struct module *mod)
-{
- struct rb_node **p = &self->mods.rb_node;
- struct rb_node *parent = NULL;
- const u64 hash = mod->hash;
- struct module *m;
-
- while (*p != NULL) {
- parent = *p;
- m = rb_entry(parent, struct module, rb_node);
- if (hash < m->hash)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
- rb_link_node(&mod->rb_node, parent, p);
- rb_insert_color(&mod->rb_node, &self->mods);
-}
-
-struct module *mod_dso__find_module(struct mod_dso *self, const char *name)
-{
- struct rb_node *n;
- u64 hash;
- int len;
-
- if (self == NULL)
- return NULL;
-
- len = strlen(name);
- hash = crc32(name, len);
-
- n = self->mods.rb_node;
-
- while (n) {
- struct module *m = rb_entry(n, struct module, rb_node);
-
- if (hash < m->hash)
- n = n->rb_left;
- else if (hash > m->hash)
- n = n->rb_right;
- else {
- if (!strcmp(name, m->name))
- return m;
- else
- n = rb_next(&m->rb_node);
- }
- }
-
- return NULL;
-}
-
-static size_t mod_dso__fprintf_module(struct module *self, FILE *fp)
-{
- return fprintf(fp, "name:%s path:%s\n", self->name, self->path);
-}
-
-size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp)
-{
- struct rb_node *nd;
- size_t ret;
-
- ret = fprintf(fp, "dso: %s\n", self->name);
-
- for (nd = rb_first(&self->mods); nd; nd = rb_next(nd)) {
- struct module *pos = rb_entry(nd, struct module, rb_node);
-
- ret += mod_dso__fprintf_module(pos, fp);
- }
-
- return ret;
-}
-
-static struct module *module__new(const char *name, const char *path)
-{
- struct module *self = calloc(1, sizeof(*self));
-
- if (!self)
- goto out_failure;
-
- self->name = calloc(1, strlen(name) + 1);
- if (!self->name)
- goto out_failure;
-
- self->path = calloc(1, strlen(path) + 1);
- if (!self->path)
- goto out_failure;
-
- strcpy(self->name, name);
- strcpy(self->path, path);
- self->hash = crc32(self->name, strlen(name));
-
- return self;
-
-out_failure:
- if (self) {
- if (self->name)
- free(self->name);
- if (self->path)
- free(self->path);
- free(self);
- }
-
- return NULL;
-}
-
-static int mod_dso__load_sections(struct module *mod)
-{
- int count = 0, path_len;
- struct dirent *entry;
- char *line = NULL;
- char *dir_path;
- DIR *dir;
- size_t n;
-
- path_len = strlen("/sys/module/");
- path_len += strlen(mod->name);
- path_len += strlen("/sections/");
-
- dir_path = calloc(1, path_len + 1);
- if (dir_path == NULL)
- goto out_failure;
-
- strcat(dir_path, "/sys/module/");
- strcat(dir_path, mod->name);
- strcat(dir_path, "/sections/");
-
- dir = opendir(dir_path);
- if (dir == NULL)
- goto out_free;
-
- while ((entry = readdir(dir))) {
- struct section *section;
- char *path, *vma;
- int line_len;
- FILE *file;
-
- if (!strcmp(".", entry->d_name) || !strcmp("..", entry->d_name))
- continue;
-
- path = calloc(1, path_len + strlen(entry->d_name) + 1);
- if (path == NULL)
- break;
- strcat(path, dir_path);
- strcat(path, entry->d_name);
-
- file = fopen(path, "r");
- if (file == NULL) {
- free(path);
- break;
- }
-
- line_len = getline(&line, &n, file);
- if (line_len < 0) {
- free(path);
- fclose(file);
- break;
- }
-
- if (!line) {
- free(path);
- fclose(file);
- break;
- }
-
- line[--line_len] = '\0'; /* \n */
-
- vma = strstr(line, "0x");
- if (!vma) {
- free(path);
- fclose(file);
- break;
- }
- vma += 2;
-
- section = section__new(entry->d_name, path);
- if (!section) {
- fprintf(stderr, "load_sections: allocation error\n");
- free(path);
- fclose(file);
- break;
- }
-
- hex2u64(vma, &section->vma);
- sec_dso__insert_section(mod->sections, section);
-
- free(path);
- fclose(file);
- count++;
- }
-
- closedir(dir);
- free(line);
- free(dir_path);
-
- return count;
-
-out_free:
- free(dir_path);
-
-out_failure:
- return count;
-}
-
-static int mod_dso__load_module_paths(struct mod_dso *self)
-{
- struct utsname uts;
- int count = 0, len, err = -1;
- char *line = NULL;
- FILE *file;
- char *dpath, *dir;
- size_t n;
-
- if (uname(&uts) < 0)
- return err;
-
- len = strlen("/lib/modules/");
- len += strlen(uts.release);
- len += strlen("/modules.dep");
-
- dpath = calloc(1, len + 1);
- if (dpath == NULL)
- return err;
-
- strcat(dpath, "/lib/modules/");
- strcat(dpath, uts.release);
- strcat(dpath, "/modules.dep");
-
- file = fopen(dpath, "r");
- if (file == NULL)
- goto out_failure;
-
- dir = dirname(dpath);
- if (!dir)
- goto out_failure;
- strcat(dir, "/");
-
- while (!feof(file)) {
- struct module *module;
- char *name, *path, *tmp;
- FILE *modfile;
- int line_len;
-
- line_len = getline(&line, &n, file);
- if (line_len < 0)
- break;
-
- if (!line)
- break;
-
- line[--line_len] = '\0'; /* \n */
-
- path = strchr(line, ':');
- if (!path)
- break;
- *path = '\0';
-
- path = strdup(line);
- if (!path)
- break;
-
- if (!strstr(path, dir)) {
- if (strncmp(path, "kernel/", 7))
- break;
-
- free(path);
- path = calloc(1, strlen(dir) + strlen(line) + 1);
- if (!path)
- break;
- strcat(path, dir);
- strcat(path, line);
- }
-
- modfile = fopen(path, "r");
- if (modfile == NULL)
- break;
- fclose(modfile);
-
- name = strdup(path);
- if (!name)
- break;
-
- name = strtok(name, "/");
- tmp = name;
-
- while (tmp) {
- tmp = strtok(NULL, "/");
- if (tmp)
- name = tmp;
- }
-
- name = strsep(&name, ".");
- if (!name)
- break;
-
- /* Quirk: replace '-' with '_' in all modules */
- for (len = strlen(name); len; len--) {
- if (*(name+len) == '-')
- *(name+len) = '_';
- }
-
- module = module__new(name, path);
- if (!module)
- break;
- mod_dso__insert_module(self, module);
-
- module->sections = sec_dso__new_dso("sections");
- if (!module->sections)
- break;
-
- module->active = mod_dso__load_sections(module);
-
- if (module->active > 0)
- count++;
- }
-
- if (feof(file))
- err = count;
- else
- fprintf(stderr, "load_module_paths: modules.dep parsing failure!\n");
-
-out_failure:
- if (dpath)
- free(dpath);
- if (file)
- fclose(file);
- if (line)
- free(line);
-
- return err;
-}
-
-int mod_dso__load_modules(struct mod_dso *dso)
-{
- int err;
-
- err = mod_dso__load_module_paths(dso);
-
- return err;
-}
diff --git a/tools/perf/util/module.h b/tools/perf/util/module.h
deleted file mode 100644
index 8a592ef641ca..000000000000
--- a/tools/perf/util/module.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef _PERF_MODULE_
-#define _PERF_MODULE_ 1
-
-#include <linux/types.h>
-#include "../types.h"
-#include <linux/list.h>
-#include <linux/rbtree.h>
-
-struct section {
- struct rb_node rb_node;
- u64 hash;
- u64 vma;
- char *name;
- char *path;
-};
-
-struct sec_dso {
- struct list_head node;
- struct rb_root secs;
- struct section *(*find_section)(struct sec_dso *, const char *name);
- char name[0];
-};
-
-struct module {
- struct rb_node rb_node;
- u64 hash;
- char *name;
- char *path;
- struct sec_dso *sections;
- int active;
-};
-
-struct mod_dso {
- struct list_head node;
- struct rb_root mods;
- struct module *(*find_module)(struct mod_dso *, const char *name);
- char name[0];
-};
-
-struct sec_dso *sec_dso__new_dso(const char *name);
-void sec_dso__delete_sections(struct sec_dso *self);
-void sec_dso__delete_self(struct sec_dso *self);
-size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp);
-struct section *sec_dso__find_section(struct sec_dso *self, const char *name);
-
-struct mod_dso *mod_dso__new_dso(const char *name);
-void mod_dso__delete_modules(struct mod_dso *self);
-void mod_dso__delete_self(struct mod_dso *self);
-size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp);
-struct module *mod_dso__find_module(struct mod_dso *self, const char *name);
-int mod_dso__load_modules(struct mod_dso *dso);
-
-#endif /* _PERF_MODULE_ */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 8cfb48cbbea0..b097570e9623 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -8,9 +8,10 @@
#include "cache.h"
#include "header.h"
-int nr_counters;
+int nr_counters;
struct perf_event_attr attrs[MAX_COUNTERS];
+char *filters[MAX_COUNTERS];
struct event_symbol {
u8 type;
@@ -708,7 +709,6 @@ static void store_event_type(const char *orgname)
perf_header__push_event(id, orgname);
}
-
int parse_events(const struct option *opt __used, const char *str, int unset __used)
{
struct perf_event_attr attr;
@@ -745,6 +745,28 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
return 0;
}
+int parse_filter(const struct option *opt __used, const char *str,
+ int unset __used)
+{
+ int i = nr_counters - 1;
+ int len = strlen(str);
+
+ if (i < 0 || attrs[i].type != PERF_TYPE_TRACEPOINT) {
+ fprintf(stderr,
+ "-F option should follow a -e tracepoint option\n");
+ return -1;
+ }
+
+ filters[i] = malloc(len + 1);
+ if (!filters[i]) {
+ fprintf(stderr, "not enough memory to hold filter string\n");
+ return -1;
+ }
+ strcpy(filters[i], str);
+
+ return 0;
+}
+
static const char * const event_type_descriptors[] = {
"",
"Hardware event",
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 30c608112845..b8c1f64bc935 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -1,5 +1,5 @@
-#ifndef _PARSE_EVENTS_H
-#define _PARSE_EVENTS_H
+#ifndef __PERF_PARSE_EVENTS_H
+#define __PERF_PARSE_EVENTS_H
/*
* Parse symbolic events/counts passed in as options:
*/
@@ -17,11 +17,13 @@ extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
extern int nr_counters;
extern struct perf_event_attr attrs[MAX_COUNTERS];
+extern char *filters[MAX_COUNTERS];
extern const char *event_name(int ctr);
extern const char *__event_name(int type, u64 config);
extern int parse_events(const struct option *opt, const char *str, int unset);
+extern int parse_filter(const struct option *opt, const char *str, int unset);
#define EVENTS_HELP_MAX (128*1024)
@@ -31,4 +33,4 @@ extern char debugfs_path[];
extern int valid_debugfs_mount(const char *debugfs);
-#endif /* _PARSE_EVENTS_H */
+#endif /* __PERF_PARSE_EVENTS_H */
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h
index 2ee248ff27e5..948805af43c2 100644
--- a/tools/perf/util/parse-options.h
+++ b/tools/perf/util/parse-options.h
@@ -1,5 +1,5 @@
-#ifndef PARSE_OPTIONS_H
-#define PARSE_OPTIONS_H
+#ifndef __PERF_PARSE_OPTIONS_H
+#define __PERF_PARSE_OPTIONS_H
enum parse_opt_type {
/* special types */
@@ -174,4 +174,4 @@ extern int parse_opt_verbosity_cb(const struct option *, const char *, int);
extern const char *parse_options_fix_filename(const char *prefix, const char *file);
-#endif
+#endif /* __PERF_PARSE_OPTIONS_H */
diff --git a/tools/perf/util/quote.h b/tools/perf/util/quote.h
index a5454a1d1c13..b6a019733919 100644
--- a/tools/perf/util/quote.h
+++ b/tools/perf/util/quote.h
@@ -1,5 +1,5 @@
-#ifndef QUOTE_H
-#define QUOTE_H
+#ifndef __PERF_QUOTE_H
+#define __PERF_QUOTE_H
#include <stddef.h>
#include <stdio.h>
@@ -65,4 +65,4 @@ extern void perl_quote_print(FILE *stream, const char *src);
extern void python_quote_print(FILE *stream, const char *src);
extern void tcl_quote_print(FILE *stream, const char *src);
-#endif
+#endif /* __PERF_QUOTE_H */
diff --git a/tools/perf/util/run-command.h b/tools/perf/util/run-command.h
index cc1837deba88..d79028727ce2 100644
--- a/tools/perf/util/run-command.h
+++ b/tools/perf/util/run-command.h
@@ -1,5 +1,5 @@
-#ifndef RUN_COMMAND_H
-#define RUN_COMMAND_H
+#ifndef __PERF_RUN_COMMAND_H
+#define __PERF_RUN_COMMAND_H
enum {
ERR_RUN_COMMAND_FORK = 10000,
@@ -85,4 +85,4 @@ struct async {
int start_async(struct async *async);
int finish_async(struct async *async);
-#endif
+#endif /* __PERF_RUN_COMMAND_H */
diff --git a/tools/perf/util/sigchain.h b/tools/perf/util/sigchain.h
index 618083bce0c6..1a53c11265fd 100644
--- a/tools/perf/util/sigchain.h
+++ b/tools/perf/util/sigchain.h
@@ -1,5 +1,5 @@
-#ifndef SIGCHAIN_H
-#define SIGCHAIN_H
+#ifndef __PERF_SIGCHAIN_H
+#define __PERF_SIGCHAIN_H
typedef void (*sigchain_fun)(int);
@@ -8,4 +8,4 @@ int sigchain_pop(int sig);
void sigchain_push_common(sigchain_fun f);
-#endif /* SIGCHAIN_H */
+#endif /* __PERF_SIGCHAIN_H */
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
new file mode 100644
index 000000000000..b490354d1b23
--- /dev/null
+++ b/tools/perf/util/sort.c
@@ -0,0 +1,290 @@
+#include "sort.h"
+
+regex_t parent_regex;
+char default_parent_pattern[] = "^sys_|^do_page_fault";
+char *parent_pattern = default_parent_pattern;
+char default_sort_order[] = "comm,dso,symbol";
+char *sort_order = default_sort_order;
+int sort__need_collapse = 0;
+int sort__has_parent = 0;
+
+enum sort_type sort__first_dimension;
+
+unsigned int dsos__col_width;
+unsigned int comms__col_width;
+unsigned int threads__col_width;
+static unsigned int parent_symbol__col_width;
+char * field_sep;
+
+LIST_HEAD(hist_entry__sort_list);
+
+struct sort_entry sort_thread = {
+ .header = "Command: Pid",
+ .cmp = sort__thread_cmp,
+ .print = sort__thread_print,
+ .width = &threads__col_width,
+};
+
+struct sort_entry sort_comm = {
+ .header = "Command",
+ .cmp = sort__comm_cmp,
+ .collapse = sort__comm_collapse,
+ .print = sort__comm_print,
+ .width = &comms__col_width,
+};
+
+struct sort_entry sort_dso = {
+ .header = "Shared Object",
+ .cmp = sort__dso_cmp,
+ .print = sort__dso_print,
+ .width = &dsos__col_width,
+};
+
+struct sort_entry sort_sym = {
+ .header = "Symbol",
+ .cmp = sort__sym_cmp,
+ .print = sort__sym_print,
+};
+
+struct sort_entry sort_parent = {
+ .header = "Parent symbol",
+ .cmp = sort__parent_cmp,
+ .print = sort__parent_print,
+ .width = &parent_symbol__col_width,
+};
+
+struct sort_dimension {
+ const char *name;
+ struct sort_entry *entry;
+ int taken;
+};
+
+static struct sort_dimension sort_dimensions[] = {
+ { .name = "pid", .entry = &sort_thread, },
+ { .name = "comm", .entry = &sort_comm, },
+ { .name = "dso", .entry = &sort_dso, },
+ { .name = "symbol", .entry = &sort_sym, },
+ { .name = "parent", .entry = &sort_parent, },
+};
+
+int64_t cmp_null(void *l, void *r)
+{
+ if (!l && !r)
+ return 0;
+ else if (!l)
+ return -1;
+ else
+ return 1;
+}
+
+/* --sort pid */
+
+int64_t
+sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return right->thread->pid - left->thread->pid;
+}
+
+int repsep_fprintf(FILE *fp, const char *fmt, ...)
+{
+ int n;
+ va_list ap;
+
+ va_start(ap, fmt);
+ if (!field_sep)
+ n = vfprintf(fp, fmt, ap);
+ else {
+ char *bf = NULL;
+ n = vasprintf(&bf, fmt, ap);
+ if (n > 0) {
+ char *sep = bf;
+
+ while (1) {
+ sep = strchr(sep, *field_sep);
+ if (sep == NULL)
+ break;
+ *sep = '.';
+ }
+ }
+ fputs(bf, fp);
+ free(bf);
+ }
+ va_end(ap);
+ return n;
+}
+
+size_t
+sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width)
+{
+ return repsep_fprintf(fp, "%*s:%5d", width - 6,
+ self->thread->comm ?: "", self->thread->pid);
+}
+
+size_t
+sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width)
+{
+ return repsep_fprintf(fp, "%*s", width, self->thread->comm);
+}
+
+/* --sort dso */
+
+int64_t
+sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct dso *dso_l = left->map ? left->map->dso : NULL;
+ struct dso *dso_r = right->map ? right->map->dso : NULL;
+ const char *dso_name_l, *dso_name_r;
+
+ if (!dso_l || !dso_r)
+ return cmp_null(dso_l, dso_r);
+
+ if (verbose) {
+ dso_name_l = dso_l->long_name;
+ dso_name_r = dso_r->long_name;
+ } else {
+ dso_name_l = dso_l->short_name;
+ dso_name_r = dso_r->short_name;
+ }
+
+ return strcmp(dso_name_l, dso_name_r);
+}
+
+size_t
+sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width)
+{
+ if (self->map && self->map->dso) {
+ const char *dso_name = !verbose ? self->map->dso->short_name :
+ self->map->dso->long_name;
+ return repsep_fprintf(fp, "%-*s", width, dso_name);
+ }
+
+ return repsep_fprintf(fp, "%*llx", width, (u64)self->ip);
+}
+
+/* --sort symbol */
+
+int64_t
+sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ u64 ip_l, ip_r;
+
+ if (left->sym == right->sym)
+ return 0;
+
+ ip_l = left->sym ? left->sym->start : left->ip;
+ ip_r = right->sym ? right->sym->start : right->ip;
+
+ return (int64_t)(ip_r - ip_l);
+}
+
+
+size_t
+sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used)
+{
+ size_t ret = 0;
+
+ if (verbose) {
+ char o = self->map ? dso__symtab_origin(self->map->dso) : '!';
+ ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, o);
+ }
+
+ ret += repsep_fprintf(fp, "[%c] ", self->level);
+ if (self->sym)
+ ret += repsep_fprintf(fp, "%s", self->sym->name);
+ else
+ ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip);
+
+ return ret;
+}
+
+/* --sort comm */
+
+int64_t
+sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return right->thread->pid - left->thread->pid;
+}
+
+int64_t
+sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
+{
+ char *comm_l = left->thread->comm;
+ char *comm_r = right->thread->comm;
+
+ if (!comm_l || !comm_r)
+ return cmp_null(comm_l, comm_r);
+
+ return strcmp(comm_l, comm_r);
+}
+
+/* --sort parent */
+
+int64_t
+sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct symbol *sym_l = left->parent;
+ struct symbol *sym_r = right->parent;
+
+ if (!sym_l || !sym_r)
+ return cmp_null(sym_l, sym_r);
+
+ return strcmp(sym_l->name, sym_r->name);
+}
+
+size_t
+sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width)
+{
+ return repsep_fprintf(fp, "%-*s", width,
+ self->parent ? self->parent->name : "[other]");
+}
+
+int sort_dimension__add(const char *tok)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
+ struct sort_dimension *sd = &sort_dimensions[i];
+
+ if (sd->taken)
+ continue;
+
+ if (strncasecmp(tok, sd->name, strlen(tok)))
+ continue;
+
+ if (sd->entry->collapse)
+ sort__need_collapse = 1;
+
+ if (sd->entry == &sort_parent) {
+ int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
+ if (ret) {
+ char err[BUFSIZ];
+
+ regerror(ret, &parent_regex, err, sizeof(err));
+ fprintf(stderr, "Invalid regex: %s\n%s",
+ parent_pattern, err);
+ exit(-1);
+ }
+ sort__has_parent = 1;
+ }
+
+ if (list_empty(&hist_entry__sort_list)) {
+ if (!strcmp(sd->name, "pid"))
+ sort__first_dimension = SORT_PID;
+ else if (!strcmp(sd->name, "comm"))
+ sort__first_dimension = SORT_COMM;
+ else if (!strcmp(sd->name, "dso"))
+ sort__first_dimension = SORT_DSO;
+ else if (!strcmp(sd->name, "symbol"))
+ sort__first_dimension = SORT_SYM;
+ else if (!strcmp(sd->name, "parent"))
+ sort__first_dimension = SORT_PARENT;
+ }
+
+ list_add_tail(&sd->entry->list, &hist_entry__sort_list);
+ sd->taken = 1;
+
+ return 0;
+ }
+
+ return -ESRCH;
+}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
new file mode 100644
index 000000000000..333e664ff45f
--- /dev/null
+++ b/tools/perf/util/sort.h
@@ -0,0 +1,99 @@
+#ifndef __PERF_SORT_H
+#define __PERF_SORT_H
+#include "../builtin.h"
+
+#include "util.h"
+
+#include "color.h"
+#include <linux/list.h>
+#include "cache.h"
+#include <linux/rbtree.h>
+#include "symbol.h"
+#include "string.h"
+#include "callchain.h"
+#include "strlist.h"
+#include "values.h"
+
+#include "../perf.h"
+#include "debug.h"
+#include "header.h"
+
+#include "parse-options.h"
+#include "parse-events.h"
+
+#include "thread.h"
+#include "sort.h"
+
+extern regex_t parent_regex;
+extern char *sort_order;
+extern char default_parent_pattern[];
+extern char *parent_pattern;
+extern char default_sort_order[];
+extern int sort__need_collapse;
+extern int sort__has_parent;
+extern char *field_sep;
+extern struct sort_entry sort_comm;
+extern struct sort_entry sort_dso;
+extern struct sort_entry sort_sym;
+extern struct sort_entry sort_parent;
+extern unsigned int dsos__col_width;
+extern unsigned int comms__col_width;
+extern unsigned int threads__col_width;
+extern enum sort_type sort__first_dimension;
+
+struct hist_entry {
+ struct rb_node rb_node;
+ u64 count;
+ struct thread *thread;
+ struct map *map;
+ struct symbol *sym;
+ u64 ip;
+ char level;
+ struct symbol *parent;
+ struct callchain_node callchain;
+ struct rb_root sorted_chain;
+};
+
+enum sort_type {
+ SORT_PID,
+ SORT_COMM,
+ SORT_DSO,
+ SORT_SYM,
+ SORT_PARENT
+};
+
+/*
+ * configurable sorting bits
+ */
+
+struct sort_entry {
+ struct list_head list;
+
+ const char *header;
+
+ int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
+ int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
+ size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width);
+ unsigned int *width;
+ bool elide;
+};
+
+extern struct sort_entry sort_thread;
+extern struct list_head hist_entry__sort_list;
+
+extern int repsep_fprintf(FILE *fp, const char *fmt, ...);
+extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int);
+extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int);
+extern size_t sort__dso_print(FILE *, struct hist_entry *, unsigned int);
+extern size_t sort__sym_print(FILE *, struct hist_entry *, unsigned int __used);
+extern int64_t cmp_null(void *, void *);
+extern int64_t sort__thread_cmp(struct hist_entry *, struct hist_entry *);
+extern int64_t sort__comm_cmp(struct hist_entry *, struct hist_entry *);
+extern int64_t sort__comm_collapse(struct hist_entry *, struct hist_entry *);
+extern int64_t sort__dso_cmp(struct hist_entry *, struct hist_entry *);
+extern int64_t sort__sym_cmp(struct hist_entry *, struct hist_entry *);
+extern int64_t sort__parent_cmp(struct hist_entry *, struct hist_entry *);
+extern size_t sort__parent_print(FILE *, struct hist_entry *, unsigned int);
+extern int sort_dimension__add(const char *);
+
+#endif /* __PERF_SORT_H */
diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h
index d2aa86c014c1..a3d121d6c83e 100644
--- a/tools/perf/util/strbuf.h
+++ b/tools/perf/util/strbuf.h
@@ -1,5 +1,5 @@
-#ifndef STRBUF_H
-#define STRBUF_H
+#ifndef __PERF_STRBUF_H
+#define __PERF_STRBUF_H
/*
* Strbuf's can be use in many ways: as a byte array, or to store arbitrary
@@ -134,4 +134,4 @@ extern int launch_editor(const char *path, struct strbuf *buffer, const char *co
extern int strbuf_branchname(struct strbuf *sb, const char *name);
extern int strbuf_check_branch_ref(struct strbuf *sb, const char *name);
-#endif /* STRBUF_H */
+#endif /* __PERF_STRBUF_H */
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index c93eca9a7be3..04743d3e9039 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -1,3 +1,4 @@
+#include <string.h>
#include "string.h"
static int hex(char ch)
@@ -32,3 +33,13 @@ int hex2u64(const char *ptr, u64 *long_val)
return p - ptr;
}
+
+char *strxfrchar(char *s, char from, char to)
+{
+ char *p = s;
+
+ while ((p = strchr(p, from)) != NULL)
+ *p++ = to;
+
+ return s;
+}
diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h
index bf39dfadfd24..2c84bf65ba0f 100644
--- a/tools/perf/util/string.h
+++ b/tools/perf/util/string.h
@@ -1,11 +1,12 @@
-#ifndef _PERF_STRING_H_
-#define _PERF_STRING_H_
+#ifndef __PERF_STRING_H_
+#define __PERF_STRING_H_
#include "types.h"
int hex2u64(const char *ptr, u64 *val);
+char *strxfrchar(char *s, char from, char to);
#define _STR(x) #x
#define STR(x) _STR(x)
-#endif
+#endif /* __PERF_STRING_H */
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index 921818e44a54..cb4659306d7b 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -1,5 +1,5 @@
-#ifndef STRLIST_H_
-#define STRLIST_H_
+#ifndef __PERF_STRLIST_H
+#define __PERF_STRLIST_H
#include <linux/rbtree.h>
#include <stdbool.h>
@@ -36,4 +36,4 @@ static inline unsigned int strlist__nr_entries(const struct strlist *self)
}
int strlist__parse_list(struct strlist *self, const char *s);
-#endif /* STRLIST_H_ */
+#endif /* __PERF_STRLIST_H */
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index 856655d8b0b8..b3637db025a2 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -103,7 +103,7 @@ void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end)
fprintf(svgfile, " rect.process2 { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.sample { fill:rgb( 0, 0,255); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.blocked { fill:rgb(255, 0, 0); fill-opacity:0.5; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
- fprintf(svgfile, " rect.waiting { fill:rgb(214,214, 0); fill-opacity:0.3; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
+ fprintf(svgfile, " rect.waiting { fill:rgb(224,214, 0); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.WAITING { fill:rgb(255,214, 48); fill-opacity:0.6; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.cpu { fill:rgb(192,192,192); fill-opacity:0.2; stroke-width:0.5; stroke:rgb(128,128,128); } \n");
fprintf(svgfile, " rect.pstate { fill:rgb(128,128,128); fill-opacity:0.8; stroke-width:0; } \n");
diff --git a/tools/perf/util/svghelper.h b/tools/perf/util/svghelper.h
index cd93195aedb3..e0781989cc31 100644
--- a/tools/perf/util/svghelper.h
+++ b/tools/perf/util/svghelper.h
@@ -1,5 +1,5 @@
-#ifndef _INCLUDE_GUARD_SVG_HELPER_
-#define _INCLUDE_GUARD_SVG_HELPER_
+#ifndef __PERF_SVGHELPER_H
+#define __PERF_SVGHELPER_H
#include "types.h"
@@ -25,4 +25,4 @@ extern void svg_close(void);
extern int svg_page_width;
-#endif
+#endif /* __PERF_SVGHELPER_H */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 47ea0609a760..8f0208ce237a 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -2,14 +2,14 @@
#include "../perf.h"
#include "string.h"
#include "symbol.h"
+#include "thread.h"
#include "debug.h"
#include <libelf.h>
#include <gelf.h>
#include <elf.h>
-
-const char *sym_hist_filter;
+#include <sys/utsname.h>
enum dso_origin {
DSO__ORIG_KERNEL = 0,
@@ -18,12 +18,65 @@ enum dso_origin {
DSO__ORIG_UBUNTU,
DSO__ORIG_BUILDID,
DSO__ORIG_DSO,
+ DSO__ORIG_KMODULE,
DSO__ORIG_NOT_FOUND,
};
-static struct symbol *symbol__new(u64 start, u64 len,
- const char *name, unsigned int priv_size,
- u64 obj_start, int v)
+static void dsos__add(struct dso *dso);
+static struct dso *dsos__find(const char *name);
+static struct map *map__new2(u64 start, struct dso *dso);
+static void kernel_maps__insert(struct map *map);
+
+static struct rb_root kernel_maps;
+
+static void dso__fixup_sym_end(struct dso *self)
+{
+ struct rb_node *nd, *prevnd = rb_first(&self->syms);
+ struct symbol *curr, *prev;
+
+ if (prevnd == NULL)
+ return;
+
+ curr = rb_entry(prevnd, struct symbol, rb_node);
+
+ for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
+ prev = curr;
+ curr = rb_entry(nd, struct symbol, rb_node);
+
+ if (prev->end == prev->start)
+ prev->end = curr->start - 1;
+ }
+
+ /* Last entry */
+ if (curr->end == curr->start)
+ curr->end = roundup(curr->start, 4096);
+}
+
+static void kernel_maps__fixup_end(void)
+{
+ struct map *prev, *curr;
+ struct rb_node *nd, *prevnd = rb_first(&kernel_maps);
+
+ if (prevnd == NULL)
+ return;
+
+ curr = rb_entry(prevnd, struct map, rb_node);
+
+ for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
+ prev = curr;
+ curr = rb_entry(nd, struct map, rb_node);
+ prev->end = curr->start - 1;
+ }
+
+ nd = rb_last(&curr->dso->syms);
+ if (nd) {
+ struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
+ curr->end = sym->end;
+ }
+}
+
+static struct symbol *symbol__new(u64 start, u64 len, const char *name,
+ unsigned int priv_size)
{
size_t namelen = strlen(name) + 1;
struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen);
@@ -31,23 +84,15 @@ static struct symbol *symbol__new(u64 start, u64 len,
if (!self)
return NULL;
- if (v >= 2)
- printf("new symbol: %016Lx [%08lx]: %s, hist: %p, obj_start: %p\n",
- (u64)start, (unsigned long)len, name, self->hist, (void *)(unsigned long)obj_start);
-
- self->obj_start= obj_start;
- self->hist = NULL;
- self->hist_sum = 0;
-
- if (sym_hist_filter && !strcmp(name, sym_hist_filter))
- self->hist = calloc(sizeof(u64), len);
-
if (priv_size) {
memset(self, 0, priv_size);
self = ((void *)self) + priv_size;
}
self->start = start;
self->end = len ? start + len - 1 : start;
+
+ pr_debug3("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end);
+
memcpy(self->name, name, namelen);
return self;
@@ -60,12 +105,8 @@ static void symbol__delete(struct symbol *self, unsigned int priv_size)
static size_t symbol__fprintf(struct symbol *self, FILE *fp)
{
- if (!self->module)
- return fprintf(fp, " %llx-%llx %s\n",
+ return fprintf(fp, " %llx-%llx %s\n",
self->start, self->end, self->name);
- else
- return fprintf(fp, " %llx-%llx %s \t[%s]\n",
- self->start, self->end, self->name, self->module->name);
}
struct dso *dso__new(const char *name, unsigned int sym_priv_size)
@@ -74,6 +115,8 @@ struct dso *dso__new(const char *name, unsigned int sym_priv_size)
if (self != NULL) {
strcpy(self->name, name);
+ self->long_name = self->name;
+ self->short_name = self->name;
self->syms = RB_ROOT;
self->sym_priv_size = sym_priv_size;
self->find_symbol = dso__find_symbol;
@@ -100,6 +143,8 @@ static void dso__delete_symbols(struct dso *self)
void dso__delete(struct dso *self)
{
dso__delete_symbols(self);
+ if (self->long_name != self->name)
+ free(self->long_name);
free(self);
}
@@ -147,7 +192,7 @@ struct symbol *dso__find_symbol(struct dso *self, u64 ip)
size_t dso__fprintf(struct dso *self, FILE *fp)
{
- size_t ret = fprintf(fp, "dso: %s\n", self->name);
+ size_t ret = fprintf(fp, "dso: %s\n", self->short_name);
struct rb_node *nd;
for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) {
@@ -158,13 +203,16 @@ size_t dso__fprintf(struct dso *self, FILE *fp)
return ret;
}
-static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v)
+/*
+ * Loads the function entries in /proc/kallsyms into kernel_map->dso,
+ * so that we can in the next step set the symbol ->end address and then
+ * call kernel_maps__split_kallsyms.
+ */
+static int kernel_maps__load_all_kallsyms(void)
{
- struct rb_node *nd, *prevnd;
char *line = NULL;
size_t n;
FILE *file = fopen("/proc/kallsyms", "r");
- int count = 0;
if (file == NULL)
goto out_failure;
@@ -174,6 +222,7 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v)
struct symbol *sym;
int line_len, len;
char symbol_type;
+ char *symbol_name;
line_len = getline(&line, &n, file);
if (line_len < 0)
@@ -196,44 +245,24 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v)
*/
if (symbol_type != 'T' && symbol_type != 'W')
continue;
+
+ symbol_name = line + len + 2;
/*
- * Well fix up the end later, when we have all sorted.
+ * Will fix up the end later, when we have all symbols sorted.
*/
- sym = symbol__new(start, 0xdead, line + len + 2,
- self->sym_priv_size, 0, v);
+ sym = symbol__new(start, 0, symbol_name,
+ kernel_map->dso->sym_priv_size);
if (sym == NULL)
goto out_delete_line;
- if (filter && filter(self, sym))
- symbol__delete(sym, self->sym_priv_size);
- else {
- dso__insert_symbol(self, sym);
- count++;
- }
- }
-
- /*
- * Now that we have all sorted out, just set the ->end of all
- * symbols
- */
- prevnd = rb_first(&self->syms);
-
- if (prevnd == NULL)
- goto out_delete_line;
-
- for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
- struct symbol *prev = rb_entry(prevnd, struct symbol, rb_node),
- *curr = rb_entry(nd, struct symbol, rb_node);
-
- prev->end = curr->start - 1;
- prevnd = nd;
+ dso__insert_symbol(kernel_map->dso, sym);
}
free(line);
fclose(file);
- return count;
+ return 0;
out_delete_line:
free(line);
@@ -241,14 +270,124 @@ out_failure:
return -1;
}
-static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int v)
+/*
+ * Split the symbols into maps, making sure there are no overlaps, i.e. the
+ * kernel range is broken in several maps, named [kernel].N, as we don't have
+ * the original ELF section names vmlinux have.
+ */
+static int kernel_maps__split_kallsyms(symbol_filter_t filter, int use_modules)
+{
+ struct map *map = kernel_map;
+ struct symbol *pos;
+ int count = 0;
+ struct rb_node *next = rb_first(&kernel_map->dso->syms);
+ int kernel_range = 0;
+
+ while (next) {
+ char *module;
+
+ pos = rb_entry(next, struct symbol, rb_node);
+ next = rb_next(&pos->rb_node);
+
+ module = strchr(pos->name, '\t');
+ if (module) {
+ if (!use_modules)
+ goto delete_symbol;
+
+ *module++ = '\0';
+
+ if (strcmp(map->dso->name, module)) {
+ map = kernel_maps__find_by_dso_name(module);
+ if (!map) {
+ pr_err("/proc/{kallsyms,modules} "
+ "inconsistency!\n");
+ return -1;
+ }
+ }
+ /*
+ * So that we look just like we get from .ko files,
+ * i.e. not prelinked, relative to map->start.
+ */
+ pos->start = map->map_ip(map, pos->start);
+ pos->end = map->map_ip(map, pos->end);
+ } else if (map != kernel_map) {
+ char dso_name[PATH_MAX];
+ struct dso *dso;
+
+ snprintf(dso_name, sizeof(dso_name), "[kernel].%d",
+ kernel_range++);
+
+ dso = dso__new(dso_name,
+ kernel_map->dso->sym_priv_size);
+ if (dso == NULL)
+ return -1;
+
+ map = map__new2(pos->start, dso);
+ if (map == NULL) {
+ dso__delete(dso);
+ return -1;
+ }
+
+ map->map_ip = map->unmap_ip = identity__map_ip;
+ kernel_maps__insert(map);
+ ++kernel_range;
+ }
+
+ if (filter && filter(map, pos)) {
+delete_symbol:
+ rb_erase(&pos->rb_node, &kernel_map->dso->syms);
+ symbol__delete(pos, kernel_map->dso->sym_priv_size);
+ } else {
+ if (map != kernel_map) {
+ rb_erase(&pos->rb_node, &kernel_map->dso->syms);
+ dso__insert_symbol(map->dso, pos);
+ }
+ count++;
+ }
+ }
+
+ return count;
+}
+
+
+static int kernel_maps__load_kallsyms(symbol_filter_t filter, int use_modules)
+{
+ if (kernel_maps__load_all_kallsyms())
+ return -1;
+
+ dso__fixup_sym_end(kernel_map->dso);
+
+ return kernel_maps__split_kallsyms(filter, use_modules);
+}
+
+static size_t kernel_maps__fprintf(FILE *fp)
+{
+ size_t printed = fprintf(fp, "Kernel maps:\n");
+ struct rb_node *nd;
+
+ for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+
+ printed += fprintf(fp, "Map:");
+ printed += map__fprintf(pos, fp);
+ if (verbose > 1) {
+ printed += dso__fprintf(pos->dso, fp);
+ printed += fprintf(fp, "--\n");
+ }
+ }
+
+ return printed + fprintf(fp, "END kernel maps\n");
+}
+
+static int dso__load_perf_map(struct dso *self, struct map *map,
+ symbol_filter_t filter)
{
char *line = NULL;
size_t n;
FILE *file;
int nr_syms = 0;
- file = fopen(self->name, "r");
+ file = fopen(self->long_name, "r");
if (file == NULL)
goto out_failure;
@@ -279,12 +418,12 @@ static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int v)
continue;
sym = symbol__new(start, size, line + len,
- self->sym_priv_size, start, v);
+ self->sym_priv_size);
if (sym == NULL)
goto out_delete_line;
- if (filter && filter(self, sym))
+ if (filter && filter(map, sym))
symbol__delete(sym, self->sym_priv_size);
else {
dso__insert_symbol(self, sym);
@@ -393,7 +532,7 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
* And always look at the original dso, not at debuginfo packages, that
* have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
*/
-static int dso__synthesize_plt_symbols(struct dso *self, int v)
+static int dso__synthesize_plt_symbols(struct dso *self)
{
uint32_t nr_rel_entries, idx;
GElf_Sym sym;
@@ -409,7 +548,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v)
Elf *elf;
int nr = 0, symidx, fd, err = 0;
- fd = open(self->name, O_RDONLY);
+ fd = open(self->long_name, O_RDONLY);
if (fd < 0)
goto out;
@@ -477,7 +616,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v)
"%s@plt", elf_sym__name(&sym, symstrs));
f = symbol__new(plt_offset, shdr_plt.sh_entsize,
- sympltname, self->sym_priv_size, 0, v);
+ sympltname, self->sym_priv_size);
if (!f)
goto out_elf_end;
@@ -495,7 +634,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v)
"%s@plt", elf_sym__name(&sym, symstrs));
f = symbol__new(plt_offset, shdr_plt.sh_entsize,
- sympltname, self->sym_priv_size, 0, v);
+ sympltname, self->sym_priv_size);
if (!f)
goto out_elf_end;
@@ -513,14 +652,18 @@ out_close:
if (err == 0)
return nr;
out:
- fprintf(stderr, "%s: problems reading %s PLT info.\n",
- __func__, self->name);
+ pr_warning("%s: problems reading %s PLT info.\n",
+ __func__, self->long_name);
return 0;
}
-static int dso__load_sym(struct dso *self, int fd, const char *name,
- symbol_filter_t filter, int v, struct module *mod)
+static int dso__load_sym(struct dso *self, struct map *map, const char *name,
+ int fd, symbol_filter_t filter, int kernel,
+ int kmodule)
{
+ struct map *curr_map = map;
+ struct dso *curr_dso = self;
+ size_t dso_name_len = strlen(self->short_name);
Elf_Data *symstrs, *secstrs;
uint32_t nr_syms;
int err = -1;
@@ -531,19 +674,16 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
GElf_Sym sym;
Elf_Scn *sec, *sec_strndx;
Elf *elf;
- int nr = 0, kernel = !strcmp("[kernel]", self->name);
+ int nr = 0;
elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
if (elf == NULL) {
- if (v)
- fprintf(stderr, "%s: cannot read %s ELF file.\n",
- __func__, name);
+ pr_err("%s: cannot read %s ELF file.\n", __func__, name);
goto out_close;
}
if (gelf_getehdr(elf, &ehdr) == NULL) {
- if (v)
- fprintf(stderr, "%s: cannot get elf header.\n", __func__);
+ pr_err("%s: cannot get elf header.\n", __func__);
goto out_elf_end;
}
@@ -587,9 +727,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
struct symbol *f;
const char *elf_name;
- char *demangled;
- u64 obj_start;
- struct section *section = NULL;
+ char *demangled = NULL;
int is_label = elf_sym__is_label(&sym);
const char *section_name;
@@ -605,52 +743,85 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
if (is_label && !elf_sec__is_text(&shdr, secstrs))
continue;
+ elf_name = elf_sym__name(&sym, symstrs);
section_name = elf_sec__name(&shdr, secstrs);
- obj_start = sym.st_value;
- if (self->adjust_symbols) {
- if (v >= 2)
- printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n",
- (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset);
+ if (kernel || kmodule) {
+ char dso_name[PATH_MAX];
- sym.st_value -= shdr.sh_addr - shdr.sh_offset;
- }
+ if (strcmp(section_name,
+ curr_dso->short_name + dso_name_len) == 0)
+ goto new_symbol;
- if (mod) {
- section = mod->sections->find_section(mod->sections, section_name);
- if (section)
- sym.st_value += section->vma;
- else {
- fprintf(stderr, "dso__load_sym() module %s lookup of %s failed\n",
- mod->name, section_name);
- goto out_elf_end;
+ if (strcmp(section_name, ".text") == 0) {
+ curr_map = map;
+ curr_dso = self;
+ goto new_symbol;
}
+
+ snprintf(dso_name, sizeof(dso_name),
+ "%s%s", self->short_name, section_name);
+
+ curr_map = kernel_maps__find_by_dso_name(dso_name);
+ if (curr_map == NULL) {
+ u64 start = sym.st_value;
+
+ if (kmodule)
+ start += map->start + shdr.sh_offset;
+
+ curr_dso = dso__new(dso_name, self->sym_priv_size);
+ if (curr_dso == NULL)
+ goto out_elf_end;
+ curr_map = map__new2(start, curr_dso);
+ if (curr_map == NULL) {
+ dso__delete(curr_dso);
+ goto out_elf_end;
+ }
+ curr_map->map_ip = identity__map_ip;
+ curr_map->unmap_ip = identity__map_ip;
+ curr_dso->origin = DSO__ORIG_KERNEL;
+ kernel_maps__insert(curr_map);
+ dsos__add(curr_dso);
+ } else
+ curr_dso = curr_map->dso;
+
+ goto new_symbol;
+ }
+
+ if (curr_dso->adjust_symbols) {
+ pr_debug2("adjusting symbol: st_value: %Lx sh_addr: "
+ "%Lx sh_offset: %Lx\n", (u64)sym.st_value,
+ (u64)shdr.sh_addr, (u64)shdr.sh_offset);
+ sym.st_value -= shdr.sh_addr - shdr.sh_offset;
}
/*
* We need to figure out if the object was created from C++ sources
* DWARF DW_compile_unit has this, but we don't always have access
* to it...
*/
- elf_name = elf_sym__name(&sym, symstrs);
demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI);
if (demangled != NULL)
elf_name = demangled;
-
+new_symbol:
f = symbol__new(sym.st_value, sym.st_size, elf_name,
- self->sym_priv_size, obj_start, v);
+ curr_dso->sym_priv_size);
free(demangled);
if (!f)
goto out_elf_end;
- if (filter && filter(self, f))
- symbol__delete(f, self->sym_priv_size);
+ if (filter && filter(curr_map, f))
+ symbol__delete(f, curr_dso->sym_priv_size);
else {
- f->module = mod;
- dso__insert_symbol(self, f);
+ dso__insert_symbol(curr_dso, f);
nr++;
}
}
+ /*
+ * For misannotated, zeroed, ASM function sizes.
+ */
+ if (nr > 0)
+ dso__fixup_sym_end(self);
err = nr;
out_elf_end:
elf_end(elf);
@@ -660,7 +831,7 @@ out_close:
#define BUILD_ID_SIZE 128
-static char *dso__read_build_id(struct dso *self, int v)
+static char *dso__read_build_id(struct dso *self)
{
int i;
GElf_Ehdr ehdr;
@@ -670,22 +841,20 @@ static char *dso__read_build_id(struct dso *self, int v)
char *build_id = NULL, *bid;
unsigned char *raw;
Elf *elf;
- int fd = open(self->name, O_RDONLY);
+ int fd = open(self->long_name, O_RDONLY);
if (fd < 0)
goto out;
elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
if (elf == NULL) {
- if (v)
- fprintf(stderr, "%s: cannot read %s ELF file.\n",
- __func__, self->name);
+ pr_err("%s: cannot read %s ELF file.\n", __func__,
+ self->long_name);
goto out_close;
}
if (gelf_getehdr(elf, &ehdr) == NULL) {
- if (v)
- fprintf(stderr, "%s: cannot get elf header.\n", __func__);
+ pr_err("%s: cannot get elf header.\n", __func__);
goto out_elf_end;
}
@@ -707,8 +876,7 @@ static char *dso__read_build_id(struct dso *self, int v)
++raw;
bid += 2;
}
- if (v >= 2)
- printf("%s(%s): %s\n", __func__, self->name, build_id);
+ pr_debug2("%s(%s): %s\n", __func__, self->long_name, build_id);
out_elf_end:
elf_end(elf);
out_close:
@@ -726,6 +894,7 @@ char dso__symtab_origin(const struct dso *self)
[DSO__ORIG_UBUNTU] = 'u',
[DSO__ORIG_BUILDID] = 'b',
[DSO__ORIG_DSO] = 'd',
+ [DSO__ORIG_KMODULE] = 'K',
};
if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND)
@@ -733,7 +902,7 @@ char dso__symtab_origin(const struct dso *self)
return origin[self->origin];
}
-int dso__load(struct dso *self, symbol_filter_t filter, int v)
+int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
{
int size = PATH_MAX;
char *name = malloc(size), *build_id = NULL;
@@ -746,7 +915,7 @@ int dso__load(struct dso *self, symbol_filter_t filter, int v)
self->adjust_symbols = 0;
if (strncmp(self->name, "/tmp/perf-", 10) == 0) {
- ret = dso__load_perf_map(self, filter, v);
+ ret = dso__load_perf_map(self, map, filter);
self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT :
DSO__ORIG_NOT_FOUND;
return ret;
@@ -759,13 +928,15 @@ more:
self->origin++;
switch (self->origin) {
case DSO__ORIG_FEDORA:
- snprintf(name, size, "/usr/lib/debug%s.debug", self->name);
+ snprintf(name, size, "/usr/lib/debug%s.debug",
+ self->long_name);
break;
case DSO__ORIG_UBUNTU:
- snprintf(name, size, "/usr/lib/debug%s", self->name);
+ snprintf(name, size, "/usr/lib/debug%s",
+ self->long_name);
break;
case DSO__ORIG_BUILDID:
- build_id = dso__read_build_id(self, v);
+ build_id = dso__read_build_id(self);
if (build_id != NULL) {
snprintf(name, size,
"/usr/lib/debug/.build-id/%.2s/%s.debug",
@@ -776,7 +947,7 @@ more:
self->origin++;
/* Fall thru */
case DSO__ORIG_DSO:
- snprintf(name, size, "%s", self->name);
+ snprintf(name, size, "%s", self->long_name);
break;
default:
@@ -786,7 +957,7 @@ more:
fd = open(name, O_RDONLY);
} while (fd < 0);
- ret = dso__load_sym(self, fd, name, filter, v, NULL);
+ ret = dso__load_sym(self, map, name, fd, filter, 0, 0);
close(fd);
/*
@@ -796,7 +967,7 @@ more:
goto more;
if (ret > 0) {
- int nr_plt = dso__synthesize_plt_symbols(self, v);
+ int nr_plt = dso__synthesize_plt_symbols(self);
if (nr_plt > 0)
ret += nr_plt;
}
@@ -807,137 +978,321 @@ out:
return ret;
}
-static int dso__load_module(struct dso *self, struct mod_dso *mods, const char *name,
- symbol_filter_t filter, int v)
+struct map *kernel_map;
+
+static void kernel_maps__insert(struct map *map)
{
- struct module *mod = mod_dso__find_module(mods, name);
- int err = 0, fd;
+ maps__insert(&kernel_maps, map);
+}
- if (mod == NULL || !mod->active)
- return err;
+struct symbol *kernel_maps__find_symbol(u64 ip, struct map **mapp)
+{
+ struct map *map = maps__find(&kernel_maps, ip);
- fd = open(mod->path, O_RDONLY);
+ if (mapp)
+ *mapp = map;
- if (fd < 0)
+ if (map) {
+ ip = map->map_ip(map, ip);
+ return map->dso->find_symbol(map->dso, ip);
+ }
+
+ return NULL;
+}
+
+struct map *kernel_maps__find_by_dso_name(const char *name)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) {
+ struct map *map = rb_entry(nd, struct map, rb_node);
+
+ if (map->dso && strcmp(map->dso->name, name) == 0)
+ return map;
+ }
+
+ return NULL;
+}
+
+static int dso__load_module_sym(struct dso *self, struct map *map,
+ symbol_filter_t filter)
+{
+ int err = 0, fd = open(self->long_name, O_RDONLY);
+
+ if (fd < 0) {
+ pr_err("%s: cannot open %s\n", __func__, self->long_name);
return err;
+ }
- err = dso__load_sym(self, fd, name, filter, v, mod);
+ err = dso__load_sym(self, map, self->long_name, fd, filter, 0, 1);
close(fd);
return err;
}
-int dso__load_modules(struct dso *self, symbol_filter_t filter, int v)
+static int dsos__load_modules_sym_dir(char *dirname, symbol_filter_t filter)
{
- struct mod_dso *mods = mod_dso__new_dso("modules");
- struct module *pos;
- struct rb_node *next;
- int err, count = 0;
+ struct dirent *dent;
+ int nr_symbols = 0, err;
+ DIR *dir = opendir(dirname);
- err = mod_dso__load_modules(mods);
+ if (!dir) {
+ pr_err("%s: cannot open %s dir\n", __func__, dirname);
+ return -1;
+ }
- if (err <= 0)
- return err;
+ while ((dent = readdir(dir)) != NULL) {
+ char path[PATH_MAX];
+
+ if (dent->d_type == DT_DIR) {
+ if (!strcmp(dent->d_name, ".") ||
+ !strcmp(dent->d_name, ".."))
+ continue;
+
+ snprintf(path, sizeof(path), "%s/%s",
+ dirname, dent->d_name);
+ err = dsos__load_modules_sym_dir(path, filter);
+ if (err < 0)
+ goto failure;
+ } else {
+ char *dot = strrchr(dent->d_name, '.'),
+ dso_name[PATH_MAX];
+ struct map *map;
+ struct rb_node *last;
+
+ if (dot == NULL || strcmp(dot, ".ko"))
+ continue;
+ snprintf(dso_name, sizeof(dso_name), "[%.*s]",
+ (int)(dot - dent->d_name), dent->d_name);
+
+ strxfrchar(dso_name, '-', '_');
+ map = kernel_maps__find_by_dso_name(dso_name);
+ if (map == NULL)
+ continue;
+
+ snprintf(path, sizeof(path), "%s/%s",
+ dirname, dent->d_name);
+
+ map->dso->long_name = strdup(path);
+ if (map->dso->long_name == NULL)
+ goto failure;
+
+ err = dso__load_module_sym(map->dso, map, filter);
+ if (err < 0)
+ goto failure;
+ last = rb_last(&map->dso->syms);
+ if (last) {
+ struct symbol *sym;
+ /*
+ * We do this here as well, even having the
+ * symbol size found in the symtab because
+ * misannotated ASM symbols may have the size
+ * set to zero.
+ */
+ dso__fixup_sym_end(map->dso);
+
+ sym = rb_entry(last, struct symbol, rb_node);
+ map->end = map->start + sym->end;
+ }
+ }
+ nr_symbols += err;
+ }
- /*
- * Iterate over modules, and load active symbols.
- */
- next = rb_first(&mods->mods);
- while (next) {
- pos = rb_entry(next, struct module, rb_node);
- err = dso__load_module(self, mods, pos->name, filter, v);
+ return nr_symbols;
+failure:
+ closedir(dir);
+ return -1;
+}
- if (err < 0)
- break;
+static int dsos__load_modules_sym(symbol_filter_t filter)
+{
+ struct utsname uts;
+ char modules_path[PATH_MAX];
- next = rb_next(&pos->rb_node);
- count += err;
- }
+ if (uname(&uts) < 0)
+ return -1;
- if (err < 0) {
- mod_dso__delete_modules(mods);
- mod_dso__delete_self(mods);
- return err;
- }
+ snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel",
+ uts.release);
- return count;
+ return dsos__load_modules_sym_dir(modules_path, filter);
}
-static inline void dso__fill_symbol_holes(struct dso *self)
+/*
+ * Constructor variant for modules (where we know from /proc/modules where
+ * they are loaded) and for vmlinux, where only after we load all the
+ * symbols we'll know where it starts and ends.
+ */
+static struct map *map__new2(u64 start, struct dso *dso)
{
- struct symbol *prev = NULL;
- struct rb_node *nd;
+ struct map *self = malloc(sizeof(*self));
- for (nd = rb_last(&self->syms); nd; nd = rb_prev(nd)) {
- struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
+ if (self != NULL) {
+ self->start = start;
+ /*
+ * Will be filled after we load all the symbols
+ */
+ self->end = 0;
- if (prev) {
- u64 hole = 0;
- int alias = pos->start == prev->start;
+ self->pgoff = 0;
+ self->dso = dso;
+ self->map_ip = map__map_ip;
+ self->unmap_ip = map__unmap_ip;
+ RB_CLEAR_NODE(&self->rb_node);
+ }
+ return self;
+}
- if (!alias)
- hole = prev->start - pos->end - 1;
+static int dsos__load_modules(unsigned int sym_priv_size)
+{
+ char *line = NULL;
+ size_t n;
+ FILE *file = fopen("/proc/modules", "r");
+ struct map *map;
- if (hole || alias) {
- if (alias)
- pos->end = prev->end;
- else if (hole)
- pos->end = prev->start - 1;
- }
+ if (file == NULL)
+ return -1;
+
+ while (!feof(file)) {
+ char name[PATH_MAX];
+ u64 start;
+ struct dso *dso;
+ char *sep;
+ int line_len;
+
+ line_len = getline(&line, &n, file);
+ if (line_len < 0)
+ break;
+
+ if (!line)
+ goto out_failure;
+
+ line[--line_len] = '\0'; /* \n */
+
+ sep = strrchr(line, 'x');
+ if (sep == NULL)
+ continue;
+
+ hex2u64(sep + 1, &start);
+
+ sep = strchr(line, ' ');
+ if (sep == NULL)
+ continue;
+
+ *sep = '\0';
+
+ snprintf(name, sizeof(name), "[%s]", line);
+ dso = dso__new(name, sym_priv_size);
+
+ if (dso == NULL)
+ goto out_delete_line;
+
+ map = map__new2(start, dso);
+ if (map == NULL) {
+ dso__delete(dso);
+ goto out_delete_line;
}
- prev = pos;
+
+ dso->origin = DSO__ORIG_KMODULE;
+ kernel_maps__insert(map);
+ dsos__add(dso);
}
+
+ free(line);
+ fclose(file);
+
+ return 0;
+
+out_delete_line:
+ free(line);
+out_failure:
+ return -1;
}
-static int dso__load_vmlinux(struct dso *self, const char *vmlinux,
- symbol_filter_t filter, int v)
+static int dso__load_vmlinux(struct dso *self, struct map *map,
+ const char *vmlinux, symbol_filter_t filter)
{
int err, fd = open(vmlinux, O_RDONLY);
if (fd < 0)
return -1;
- err = dso__load_sym(self, fd, vmlinux, filter, v, NULL);
-
- if (err > 0)
- dso__fill_symbol_holes(self);
+ err = dso__load_sym(self, map, self->long_name, fd, filter, 1, 0);
close(fd);
return err;
}
-int dso__load_kernel(struct dso *self, const char *vmlinux,
- symbol_filter_t filter, int v, int use_modules)
+int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size,
+ symbol_filter_t filter, int use_modules)
{
int err = -1;
+ struct dso *dso = dso__new(vmlinux, sym_priv_size);
+
+ if (dso == NULL)
+ return -1;
+
+ dso->short_name = "[kernel]";
+ kernel_map = map__new2(0, dso);
+ if (kernel_map == NULL)
+ goto out_delete_dso;
+
+ kernel_map->map_ip = kernel_map->unmap_ip = identity__map_ip;
+
+ if (use_modules && dsos__load_modules(sym_priv_size) < 0) {
+ pr_warning("Failed to load list of modules in use! "
+ "Continuing...\n");
+ use_modules = 0;
+ }
if (vmlinux) {
- err = dso__load_vmlinux(self, vmlinux, filter, v);
+ err = dso__load_vmlinux(dso, kernel_map, vmlinux, filter);
if (err > 0 && use_modules) {
- int syms = dso__load_modules(self, filter, v);
+ int syms = dsos__load_modules_sym(filter);
- if (syms < 0) {
- fprintf(stderr, "dso__load_modules failed!\n");
- return syms;
- }
- err += syms;
+ if (syms < 0)
+ pr_warning("Failed to read module symbols!"
+ " Continuing...\n");
+ else
+ err += syms;
}
}
if (err <= 0)
- err = dso__load_kallsyms(self, filter, v);
+ err = kernel_maps__load_kallsyms(filter, use_modules);
+
+ if (err > 0) {
+ struct rb_node *node = rb_first(&dso->syms);
+ struct symbol *sym = rb_entry(node, struct symbol, rb_node);
+
+ kernel_map->start = sym->start;
+ node = rb_last(&dso->syms);
+ sym = rb_entry(node, struct symbol, rb_node);
+ kernel_map->end = sym->end;
+
+ dso->origin = DSO__ORIG_KERNEL;
+ kernel_maps__insert(kernel_map);
+ /*
+ * Now that we have all sorted out, just set the ->end of all
+ * maps:
+ */
+ kernel_maps__fixup_end();
+ dsos__add(dso);
- if (err > 0)
- self->origin = DSO__ORIG_KERNEL;
+ if (verbose)
+ kernel_maps__fprintf(stderr);
+ }
return err;
+
+out_delete_dso:
+ dso__delete(dso);
+ return -1;
}
LIST_HEAD(dsos);
-struct dso *kernel_dso;
struct dso *vdso;
-struct dso *hypervisor_dso;
const char *vmlinux_name = "vmlinux";
int modules;
@@ -957,33 +1312,21 @@ static struct dso *dsos__find(const char *name)
return NULL;
}
-struct dso *dsos__findnew(const char *name)
+struct dso *dsos__findnew(const char *name, unsigned int sym_priv_size,
+ bool *is_new)
{
struct dso *dso = dsos__find(name);
- int nr;
- if (dso)
- return dso;
-
- dso = dso__new(name, 0);
- if (!dso)
- goto out_delete_dso;
-
- nr = dso__load(dso, NULL, verbose);
- if (nr < 0) {
- eprintf("Failed to open: %s\n", name);
- goto out_delete_dso;
- }
- if (!nr)
- eprintf("No symbols found in: %s, maybe install a debug package?\n", name);
-
- dsos__add(dso);
+ if (!dso) {
+ dso = dso__new(name, sym_priv_size);
+ if (dso) {
+ dsos__add(dso);
+ *is_new = true;
+ }
+ } else
+ *is_new = false;
return dso;
-
-out_delete_dso:
- dso__delete(dso);
- return NULL;
}
void dsos__fprintf(FILE *fp)
@@ -994,43 +1337,21 @@ void dsos__fprintf(FILE *fp)
dso__fprintf(pos, fp);
}
-static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
-{
- return dso__find_symbol(dso, ip);
-}
-
-int load_kernel(void)
+int load_kernel(unsigned int sym_priv_size, symbol_filter_t filter)
{
- int err;
-
- kernel_dso = dso__new("[kernel]", 0);
- if (!kernel_dso)
+ if (dsos__load_kernel(vmlinux_name, sym_priv_size, filter,
+ modules) <= 0)
return -1;
- err = dso__load_kernel(kernel_dso, vmlinux_name, NULL, verbose, modules);
- if (err <= 0) {
- dso__delete(kernel_dso);
- kernel_dso = NULL;
- } else
- dsos__add(kernel_dso);
-
vdso = dso__new("[vdso]", 0);
if (!vdso)
return -1;
- vdso->find_symbol = vdso__find_symbol;
-
dsos__add(vdso);
- hypervisor_dso = dso__new("[hypervisor]", 0);
- if (!hypervisor_dso)
- return -1;
- dsos__add(hypervisor_dso);
-
- return err;
+ return 0;
}
-
void symbol__init(void)
{
elf_version(EV_CURRENT);
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 6e8490716408..77b7b3e42417 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -1,11 +1,11 @@
-#ifndef _PERF_SYMBOL_
-#define _PERF_SYMBOL_ 1
+#ifndef __PERF_SYMBOL
+#define __PERF_SYMBOL 1
#include <linux/types.h>
+#include <stdbool.h>
#include "types.h"
#include <linux/list.h>
#include <linux/rbtree.h>
-#include "module.h"
#include "event.h"
#ifdef HAVE_CPLUS_DEMANGLE
@@ -36,11 +36,6 @@ struct symbol {
struct rb_node rb_node;
u64 start;
u64 end;
- u64 obj_start;
- u64 hist_sum;
- u64 *hist;
- struct module *module;
- void *priv;
char name[0];
};
@@ -52,13 +47,11 @@ struct dso {
unsigned char adjust_symbols;
unsigned char slen_calculated;
unsigned char origin;
+ const char *short_name;
+ char *long_name;
char name[0];
};
-extern const char *sym_hist_filter;
-
-typedef int (*symbol_filter_t)(struct dso *self, struct symbol *sym);
-
struct dso *dso__new(const char *name, unsigned int sym_priv_size);
void dso__delete(struct dso *self);
@@ -69,24 +62,23 @@ static inline void *dso__sym_priv(struct dso *self, struct symbol *sym)
struct symbol *dso__find_symbol(struct dso *self, u64 ip);
-int dso__load_kernel(struct dso *self, const char *vmlinux,
- symbol_filter_t filter, int verbose, int modules);
-int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose);
-int dso__load(struct dso *self, symbol_filter_t filter, int verbose);
-struct dso *dsos__findnew(const char *name);
+int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size,
+ symbol_filter_t filter, int modules);
+struct dso *dsos__findnew(const char *name, unsigned int sym_priv_size,
+ bool *is_new);
+int dso__load(struct dso *self, struct map *map, symbol_filter_t filter);
void dsos__fprintf(FILE *fp);
size_t dso__fprintf(struct dso *self, FILE *fp);
char dso__symtab_origin(const struct dso *self);
-int load_kernel(void);
+int load_kernel(unsigned int sym_priv_size, symbol_filter_t filter);
void symbol__init(void);
extern struct list_head dsos;
-extern struct dso *kernel_dso;
+extern struct map *kernel_map;
extern struct dso *vdso;
-extern struct dso *hypervisor_dso;
extern const char *vmlinux_name;
extern int modules;
-#endif /* _PERF_SYMBOL_ */
+#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 45efb5db0d19..0f6d78c9863a 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -6,6 +6,9 @@
#include "util.h"
#include "debug.h"
+static struct rb_root threads;
+static struct thread *last_match;
+
static struct thread *thread__new(pid_t pid)
{
struct thread *self = calloc(1, sizeof(*self));
@@ -15,7 +18,8 @@ static struct thread *thread__new(pid_t pid)
self->comm = malloc(32);
if (self->comm)
snprintf(self->comm, 32, ":%d", self->pid);
- INIT_LIST_HEAD(&self->maps);
+ self->maps = RB_ROOT;
+ INIT_LIST_HEAD(&self->removed_maps);
}
return self;
@@ -29,21 +33,40 @@ int thread__set_comm(struct thread *self, const char *comm)
return self->comm ? 0 : -ENOMEM;
}
+int thread__comm_len(struct thread *self)
+{
+ if (!self->comm_len) {
+ if (!self->comm)
+ return 0;
+ self->comm_len = strlen(self->comm);
+ }
+
+ return self->comm_len;
+}
+
static size_t thread__fprintf(struct thread *self, FILE *fp)
{
+ struct rb_node *nd;
struct map *pos;
- size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
+ size_t ret = fprintf(fp, "Thread %d %s\nCurrent maps:\n",
+ self->pid, self->comm);
- list_for_each_entry(pos, &self->maps, node)
+ for (nd = rb_first(&self->maps); nd; nd = rb_next(nd)) {
+ pos = rb_entry(nd, struct map, rb_node);
+ ret += map__fprintf(pos, fp);
+ }
+
+ ret = fprintf(fp, "Removed maps:\n");
+
+ list_for_each_entry(pos, &self->removed_maps, node)
ret += map__fprintf(pos, fp);
return ret;
}
-struct thread *
-threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
+struct thread *threads__findnew(pid_t pid)
{
- struct rb_node **p = &threads->rb_node;
+ struct rb_node **p = &threads.rb_node;
struct rb_node *parent = NULL;
struct thread *th;
@@ -52,15 +75,15 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
* so most of the time we dont have to look up
* the full rbtree:
*/
- if (*last_match && (*last_match)->pid == pid)
- return *last_match;
+ if (last_match && last_match->pid == pid)
+ return last_match;
while (*p != NULL) {
parent = *p;
th = rb_entry(parent, struct thread, rb_node);
if (th->pid == pid) {
- *last_match = th;
+ last_match = th;
return th;
}
@@ -73,17 +96,16 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
th = thread__new(pid);
if (th != NULL) {
rb_link_node(&th->rb_node, parent, p);
- rb_insert_color(&th->rb_node, threads);
- *last_match = th;
+ rb_insert_color(&th->rb_node, &threads);
+ last_match = th;
}
return th;
}
-struct thread *
-register_idle_thread(struct rb_root *threads, struct thread **last_match)
+struct thread *register_idle_thread(void)
{
- struct thread *thread = threads__findnew(0, threads, last_match);
+ struct thread *thread = threads__findnew(0);
if (!thread || thread__set_comm(thread, "swapper")) {
fprintf(stderr, "problem inserting idle task.\n");
@@ -93,42 +115,82 @@ register_idle_thread(struct rb_root *threads, struct thread **last_match)
return thread;
}
-void thread__insert_map(struct thread *self, struct map *map)
+static void thread__remove_overlappings(struct thread *self, struct map *map)
{
- struct map *pos, *tmp;
-
- list_for_each_entry_safe(pos, tmp, &self->maps, node) {
- if (map__overlap(pos, map)) {
- if (verbose >= 2) {
- printf("overlapping maps:\n");
- map__fprintf(map, stdout);
- map__fprintf(pos, stdout);
- }
-
- if (map->start <= pos->start && map->end > pos->start)
- pos->start = map->end;
-
- if (map->end >= pos->end && map->start < pos->end)
- pos->end = map->start;
-
- if (verbose >= 2) {
- printf("after collision:\n");
- map__fprintf(pos, stdout);
- }
-
- if (pos->start >= pos->end) {
- list_del_init(&pos->node);
- free(pos);
- }
+ struct rb_node *next = rb_first(&self->maps);
+
+ while (next) {
+ struct map *pos = rb_entry(next, struct map, rb_node);
+ next = rb_next(&pos->rb_node);
+
+ if (!map__overlap(pos, map))
+ continue;
+
+ if (verbose >= 2) {
+ fputs("overlapping maps:\n", stderr);
+ map__fprintf(map, stderr);
+ map__fprintf(pos, stderr);
}
+
+ rb_erase(&pos->rb_node, &self->maps);
+ /*
+ * We may have references to this map, for instance in some
+ * hist_entry instances, so just move them to a separate
+ * list.
+ */
+ list_add_tail(&pos->node, &self->removed_maps);
+ }
+}
+
+void maps__insert(struct rb_root *maps, struct map *map)
+{
+ struct rb_node **p = &maps->rb_node;
+ struct rb_node *parent = NULL;
+ const u64 ip = map->start;
+ struct map *m;
+
+ while (*p != NULL) {
+ parent = *p;
+ m = rb_entry(parent, struct map, rb_node);
+ if (ip < m->start)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
}
- list_add_tail(&map->node, &self->maps);
+ rb_link_node(&map->rb_node, parent, p);
+ rb_insert_color(&map->rb_node, maps);
+}
+
+struct map *maps__find(struct rb_root *maps, u64 ip)
+{
+ struct rb_node **p = &maps->rb_node;
+ struct rb_node *parent = NULL;
+ struct map *m;
+
+ while (*p != NULL) {
+ parent = *p;
+ m = rb_entry(parent, struct map, rb_node);
+ if (ip < m->start)
+ p = &(*p)->rb_left;
+ else if (ip > m->end)
+ p = &(*p)->rb_right;
+ else
+ return m;
+ }
+
+ return NULL;
+}
+
+void thread__insert_map(struct thread *self, struct map *map)
+{
+ thread__remove_overlappings(self, map);
+ maps__insert(&self->maps, map);
}
int thread__fork(struct thread *self, struct thread *parent)
{
- struct map *map;
+ struct rb_node *nd;
if (self->comm)
free(self->comm);
@@ -136,7 +198,8 @@ int thread__fork(struct thread *self, struct thread *parent)
if (!self->comm)
return -ENOMEM;
- list_for_each_entry(map, &parent->maps, node) {
+ for (nd = rb_first(&parent->maps); nd; nd = rb_next(nd)) {
+ struct map *map = rb_entry(nd, struct map, rb_node);
struct map *new = map__clone(map);
if (!new)
return -ENOMEM;
@@ -146,26 +209,12 @@ int thread__fork(struct thread *self, struct thread *parent)
return 0;
}
-struct map *thread__find_map(struct thread *self, u64 ip)
-{
- struct map *pos;
-
- if (self == NULL)
- return NULL;
-
- list_for_each_entry(pos, &self->maps, node)
- if (ip >= pos->start && ip <= pos->end)
- return pos;
-
- return NULL;
-}
-
-size_t threads__fprintf(FILE *fp, struct rb_root *threads)
+size_t threads__fprintf(FILE *fp)
{
size_t ret = 0;
struct rb_node *nd;
- for (nd = rb_first(threads); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread, rb_node);
ret += thread__fprintf(pos, fp);
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 32aea3c1c2ad..53addd77ce8f 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -1,22 +1,37 @@
+#ifndef __PERF_THREAD_H
+#define __PERF_THREAD_H
+
#include <linux/rbtree.h>
-#include <linux/list.h>
#include <unistd.h>
#include "symbol.h"
struct thread {
struct rb_node rb_node;
- struct list_head maps;
+ struct rb_root maps;
+ struct list_head removed_maps;
pid_t pid;
char shortname[3];
char *comm;
+ int comm_len;
};
int thread__set_comm(struct thread *self, const char *comm);
-struct thread *
-threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match);
-struct thread *
-register_idle_thread(struct rb_root *threads, struct thread **last_match);
+int thread__comm_len(struct thread *self);
+struct thread *threads__findnew(pid_t pid);
+struct thread *register_idle_thread(void);
void thread__insert_map(struct thread *self, struct map *map);
int thread__fork(struct thread *self, struct thread *parent);
-struct map *thread__find_map(struct thread *self, u64 ip);
-size_t threads__fprintf(FILE *fp, struct rb_root *threads);
+size_t threads__fprintf(FILE *fp);
+
+void maps__insert(struct rb_root *maps, struct map *map);
+struct map *maps__find(struct rb_root *maps, u64 ip);
+
+struct symbol *kernel_maps__find_symbol(const u64 ip, struct map **mapp);
+struct map *kernel_maps__find_by_dso_name(const char *name);
+
+static inline struct map *thread__find_map(struct thread *self, u64 ip)
+{
+ return self ? maps__find(&self->maps, ip) : NULL;
+}
+
+#endif /* __PERF_THREAD_H */
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index af4b0573b37f..831052d4b4fb 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -496,14 +496,12 @@ get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events)
return path.next;
}
-void read_tracing_data(struct perf_event_attr *pattrs, int nb_events)
+void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events)
{
char buf[BUFSIZ];
struct tracepoint_path *tps;
- output_fd = open(output_file, O_WRONLY | O_CREAT | O_TRUNC | O_LARGEFILE, 0644);
- if (output_fd < 0)
- die("creating file '%s'", output_file);
+ output_fd = fd;
buf[0] = 23;
buf[1] = 8;
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 55c9659a56e2..eae560503086 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -40,6 +40,8 @@ int header_page_size_size;
int header_page_data_offset;
int header_page_data_size;
+int latency_format;
+
static char *input_buf;
static unsigned long long input_buf_ptr;
static unsigned long long input_buf_siz;
@@ -284,18 +286,19 @@ void parse_ftrace_printk(char *file, unsigned int size __unused)
char *line;
char *next = NULL;
char *addr_str;
- int ret;
int i;
line = strtok_r(file, "\n", &next);
while (line) {
+ addr_str = strsep(&line, ":");
+ if (!line) {
+ warning("error parsing print strings");
+ break;
+ }
item = malloc_or_die(sizeof(*item));
- ret = sscanf(line, "%as : %as",
- (float *)(void *)&addr_str, /* workaround gcc warning */
- (float *)(void *)&item->printk);
item->addr = strtoull(addr_str, NULL, 16);
- free(addr_str);
-
+ /* fmt still has a space, skip it */
+ item->printk = strdup(line+1);
item->next = list;
list = item;
line = strtok_r(NULL, "\n", &next);
@@ -522,7 +525,10 @@ static enum event_type __read_token(char **tok)
last_ch = ch;
ch = __read_char();
buf[i++] = ch;
- } while (ch != quote_ch && last_ch != '\\');
+ /* the '\' '\' will cancel itself */
+ if (ch == '\\' && last_ch == '\\')
+ last_ch = 0;
+ } while (ch != quote_ch || last_ch == '\\');
/* remove the last quote */
i--;
goto out;
@@ -610,7 +616,7 @@ static enum event_type read_token_item(char **tok)
static int test_type(enum event_type type, enum event_type expect)
{
if (type != expect) {
- die("Error: expected type %d but read %d",
+ warning("Error: expected type %d but read %d",
expect, type);
return -1;
}
@@ -621,13 +627,13 @@ static int test_type_token(enum event_type type, char *token,
enum event_type expect, const char *expect_tok)
{
if (type != expect) {
- die("Error: expected type %d but read %d",
+ warning("Error: expected type %d but read %d",
expect, type);
return -1;
}
if (strcmp(token, expect_tok) != 0) {
- die("Error: expected '%s' but read '%s'",
+ warning("Error: expected '%s' but read '%s'",
expect_tok, token);
return -1;
}
@@ -665,7 +671,7 @@ static int __read_expected(enum event_type expect, const char *str, int newline_
free_token(token);
- return 0;
+ return ret;
}
static int read_expected(enum event_type expect, const char *str)
@@ -682,10 +688,10 @@ static char *event_read_name(void)
{
char *token;
- if (read_expected(EVENT_ITEM, (char *)"name") < 0)
+ if (read_expected(EVENT_ITEM, "name") < 0)
return NULL;
- if (read_expected(EVENT_OP, (char *)":") < 0)
+ if (read_expected(EVENT_OP, ":") < 0)
return NULL;
if (read_expect_type(EVENT_ITEM, &token) < 0)
@@ -703,10 +709,10 @@ static int event_read_id(void)
char *token;
int id;
- if (read_expected_item(EVENT_ITEM, (char *)"ID") < 0)
+ if (read_expected_item(EVENT_ITEM, "ID") < 0)
return -1;
- if (read_expected(EVENT_OP, (char *)":") < 0)
+ if (read_expected(EVENT_OP, ":") < 0)
return -1;
if (read_expect_type(EVENT_ITEM, &token) < 0)
@@ -721,6 +727,24 @@ static int event_read_id(void)
return -1;
}
+static int field_is_string(struct format_field *field)
+{
+ if ((field->flags & FIELD_IS_ARRAY) &&
+ (!strstr(field->type, "char") || !strstr(field->type, "u8") ||
+ !strstr(field->type, "s8")))
+ return 1;
+
+ return 0;
+}
+
+static int field_is_dynamic(struct format_field *field)
+{
+ if (!strcmp(field->type, "__data_loc"))
+ return 1;
+
+ return 0;
+}
+
static int event_read_fields(struct event *event, struct format_field **fields)
{
struct format_field *field = NULL;
@@ -738,7 +762,7 @@ static int event_read_fields(struct event *event, struct format_field **fields)
count++;
- if (test_type_token(type, token, EVENT_ITEM, (char *)"field"))
+ if (test_type_token(type, token, EVENT_ITEM, "field"))
goto fail;
free_token(token);
@@ -753,7 +777,7 @@ static int event_read_fields(struct event *event, struct format_field **fields)
type = read_token(&token);
}
- if (test_type_token(type, token, EVENT_OP, (char *)":") < 0)
+ if (test_type_token(type, token, EVENT_OP, ":") < 0)
return -1;
if (read_expect_type(EVENT_ITEM, &token) < 0)
@@ -865,14 +889,20 @@ static int event_read_fields(struct event *event, struct format_field **fields)
free(brackets);
}
- if (test_type_token(type, token, EVENT_OP, (char *)";"))
+ if (field_is_string(field)) {
+ field->flags |= FIELD_IS_STRING;
+ if (field_is_dynamic(field))
+ field->flags |= FIELD_IS_DYNAMIC;
+ }
+
+ if (test_type_token(type, token, EVENT_OP, ";"))
goto fail;
free_token(token);
- if (read_expected(EVENT_ITEM, (char *)"offset") < 0)
+ if (read_expected(EVENT_ITEM, "offset") < 0)
goto fail_expect;
- if (read_expected(EVENT_OP, (char *)":") < 0)
+ if (read_expected(EVENT_OP, ":") < 0)
goto fail_expect;
if (read_expect_type(EVENT_ITEM, &token))
@@ -880,13 +910,13 @@ static int event_read_fields(struct event *event, struct format_field **fields)
field->offset = strtoul(token, NULL, 0);
free_token(token);
- if (read_expected(EVENT_OP, (char *)";") < 0)
+ if (read_expected(EVENT_OP, ";") < 0)
goto fail_expect;
- if (read_expected(EVENT_ITEM, (char *)"size") < 0)
+ if (read_expected(EVENT_ITEM, "size") < 0)
goto fail_expect;
- if (read_expected(EVENT_OP, (char *)":") < 0)
+ if (read_expected(EVENT_OP, ":") < 0)
goto fail_expect;
if (read_expect_type(EVENT_ITEM, &token))
@@ -894,11 +924,33 @@ static int event_read_fields(struct event *event, struct format_field **fields)
field->size = strtoul(token, NULL, 0);
free_token(token);
- if (read_expected(EVENT_OP, (char *)";") < 0)
+ if (read_expected(EVENT_OP, ";") < 0)
goto fail_expect;
- if (read_expect_type(EVENT_NEWLINE, &token) < 0)
- goto fail;
+ type = read_token(&token);
+ if (type != EVENT_NEWLINE) {
+ /* newer versions of the kernel have a "signed" type */
+ if (test_type_token(type, token, EVENT_ITEM, "signed"))
+ goto fail;
+
+ free_token(token);
+
+ if (read_expected(EVENT_OP, ":") < 0)
+ goto fail_expect;
+
+ if (read_expect_type(EVENT_ITEM, &token))
+ goto fail;
+
+ /* add signed type */
+
+ free_token(token);
+ if (read_expected(EVENT_OP, ";") < 0)
+ goto fail_expect;
+
+ if (read_expect_type(EVENT_NEWLINE, &token))
+ goto fail;
+ }
+
free_token(token);
*fields = field;
@@ -921,10 +973,10 @@ static int event_read_format(struct event *event)
char *token;
int ret;
- if (read_expected_item(EVENT_ITEM, (char *)"format") < 0)
+ if (read_expected_item(EVENT_ITEM, "format") < 0)
return -1;
- if (read_expected(EVENT_OP, (char *)":") < 0)
+ if (read_expected(EVENT_OP, ":") < 0)
return -1;
if (read_expect_type(EVENT_NEWLINE, &token))
@@ -984,7 +1036,7 @@ process_cond(struct event *event, struct print_arg *top, char **tok)
*tok = NULL;
type = process_arg(event, left, &token);
- if (test_type_token(type, token, EVENT_OP, (char *)":"))
+ if (test_type_token(type, token, EVENT_OP, ":"))
goto out_free;
arg->op.op = token;
@@ -1004,6 +1056,35 @@ out_free:
return EVENT_ERROR;
}
+static enum event_type
+process_array(struct event *event, struct print_arg *top, char **tok)
+{
+ struct print_arg *arg;
+ enum event_type type;
+ char *token = NULL;
+
+ arg = malloc_or_die(sizeof(*arg));
+ memset(arg, 0, sizeof(*arg));
+
+ *tok = NULL;
+ type = process_arg(event, arg, &token);
+ if (test_type_token(type, token, EVENT_OP, "]"))
+ goto out_free;
+
+ top->op.right = arg;
+
+ free_token(token);
+ type = read_token_item(&token);
+ *tok = token;
+
+ return type;
+
+out_free:
+ free_token(*tok);
+ free_arg(arg);
+ return EVENT_ERROR;
+}
+
static int get_op_prio(char *op)
{
if (!op[1]) {
@@ -1128,6 +1209,8 @@ process_op(struct event *event, struct print_arg *arg, char **tok)
strcmp(token, "*") == 0 ||
strcmp(token, "^") == 0 ||
strcmp(token, "/") == 0 ||
+ strcmp(token, "<") == 0 ||
+ strcmp(token, ">") == 0 ||
strcmp(token, "==") == 0 ||
strcmp(token, "!=") == 0) {
@@ -1144,17 +1227,46 @@ process_op(struct event *event, struct print_arg *arg, char **tok)
right = malloc_or_die(sizeof(*right));
- type = process_arg(event, right, tok);
+ type = read_token_item(&token);
+ *tok = token;
+
+ /* could just be a type pointer */
+ if ((strcmp(arg->op.op, "*") == 0) &&
+ type == EVENT_DELIM && (strcmp(token, ")") == 0)) {
+ if (left->type != PRINT_ATOM)
+ die("bad pointer type");
+ left->atom.atom = realloc(left->atom.atom,
+ sizeof(left->atom.atom) + 3);
+ strcat(left->atom.atom, " *");
+ *arg = *left;
+ free(arg);
+
+ return type;
+ }
+
+ type = process_arg_token(event, right, tok, type);
arg->op.right = right;
+ } else if (strcmp(token, "[") == 0) {
+
+ left = malloc_or_die(sizeof(*left));
+ *left = *arg;
+
+ arg->type = PRINT_OP;
+ arg->op.op = token;
+ arg->op.left = left;
+
+ arg->op.prio = 0;
+ type = process_array(event, arg, tok);
+
} else {
- die("unknown op '%s'", token);
+ warning("unknown op '%s'", token);
+ event->flags |= EVENT_FL_FAILED;
/* the arg is now the left side */
return EVENT_NONE;
}
-
if (type == EVENT_OP) {
int prio;
@@ -1178,7 +1290,7 @@ process_entry(struct event *event __unused, struct print_arg *arg,
char *field;
char *token;
- if (read_expected(EVENT_OP, (char *)"->") < 0)
+ if (read_expected(EVENT_OP, "->") < 0)
return EVENT_ERROR;
if (read_expect_type(EVENT_ITEM, &token) < 0)
@@ -1338,14 +1450,14 @@ process_fields(struct event *event, struct print_flag_sym **list, char **tok)
do {
free_token(token);
type = read_token_item(&token);
- if (test_type_token(type, token, EVENT_OP, (char *)"{"))
+ if (test_type_token(type, token, EVENT_OP, "{"))
break;
arg = malloc_or_die(sizeof(*arg));
free_token(token);
type = process_arg(event, arg, &token);
- if (test_type_token(type, token, EVENT_DELIM, (char *)","))
+ if (test_type_token(type, token, EVENT_DELIM, ","))
goto out_free;
field = malloc_or_die(sizeof(*field));
@@ -1356,7 +1468,7 @@ process_fields(struct event *event, struct print_flag_sym **list, char **tok)
free_token(token);
type = process_arg(event, arg, &token);
- if (test_type_token(type, token, EVENT_OP, (char *)"}"))
+ if (test_type_token(type, token, EVENT_OP, "}"))
goto out_free;
value = arg_eval(arg);
@@ -1391,13 +1503,13 @@ process_flags(struct event *event, struct print_arg *arg, char **tok)
memset(arg, 0, sizeof(*arg));
arg->type = PRINT_FLAGS;
- if (read_expected_item(EVENT_DELIM, (char *)"(") < 0)
+ if (read_expected_item(EVENT_DELIM, "(") < 0)
return EVENT_ERROR;
field = malloc_or_die(sizeof(*field));
type = process_arg(event, field, &token);
- if (test_type_token(type, token, EVENT_DELIM, (char *)","))
+ if (test_type_token(type, token, EVENT_DELIM, ","))
goto out_free;
arg->flags.field = field;
@@ -1408,11 +1520,11 @@ process_flags(struct event *event, struct print_arg *arg, char **tok)
type = read_token_item(&token);
}
- if (test_type_token(type, token, EVENT_DELIM, (char *)","))
+ if (test_type_token(type, token, EVENT_DELIM, ","))
goto out_free;
type = process_fields(event, &arg->flags.flags, &token);
- if (test_type_token(type, token, EVENT_DELIM, (char *)")"))
+ if (test_type_token(type, token, EVENT_DELIM, ")"))
goto out_free;
free_token(token);
@@ -1434,19 +1546,19 @@ process_symbols(struct event *event, struct print_arg *arg, char **tok)
memset(arg, 0, sizeof(*arg));
arg->type = PRINT_SYMBOL;
- if (read_expected_item(EVENT_DELIM, (char *)"(") < 0)
+ if (read_expected_item(EVENT_DELIM, "(") < 0)
return EVENT_ERROR;
field = malloc_or_die(sizeof(*field));
type = process_arg(event, field, &token);
- if (test_type_token(type, token, EVENT_DELIM, (char *)","))
+ if (test_type_token(type, token, EVENT_DELIM, ","))
goto out_free;
arg->symbol.field = field;
type = process_fields(event, &arg->symbol.symbols, &token);
- if (test_type_token(type, token, EVENT_DELIM, (char *)")"))
+ if (test_type_token(type, token, EVENT_DELIM, ")"))
goto out_free;
free_token(token);
@@ -1463,7 +1575,6 @@ process_paren(struct event *event, struct print_arg *arg, char **tok)
{
struct print_arg *item_arg;
enum event_type type;
- int ptr_cast = 0;
char *token;
type = process_arg(event, arg, &token);
@@ -1471,28 +1582,13 @@ process_paren(struct event *event, struct print_arg *arg, char **tok)
if (type == EVENT_ERROR)
return EVENT_ERROR;
- if (type == EVENT_OP) {
- /* handle the ptr casts */
- if (!strcmp(token, "*")) {
- /*
- * FIXME: should we zapp whitespaces before ')' ?
- * (may require a peek_token_item())
- */
- if (__peek_char() == ')') {
- ptr_cast = 1;
- free_token(token);
- type = read_token_item(&token);
- }
- }
- if (!ptr_cast) {
- type = process_op(event, arg, &token);
+ if (type == EVENT_OP)
+ type = process_op(event, arg, &token);
- if (type == EVENT_ERROR)
- return EVENT_ERROR;
- }
- }
+ if (type == EVENT_ERROR)
+ return EVENT_ERROR;
- if (test_type_token(type, token, EVENT_DELIM, (char *)")")) {
+ if (test_type_token(type, token, EVENT_DELIM, ")")) {
free_token(token);
return EVENT_ERROR;
}
@@ -1516,13 +1612,6 @@ process_paren(struct event *event, struct print_arg *arg, char **tok)
item_arg = malloc_or_die(sizeof(*item_arg));
arg->type = PRINT_TYPE;
- if (ptr_cast) {
- char *old = arg->atom.atom;
-
- arg->atom.atom = malloc_or_die(strlen(old + 3));
- sprintf(arg->atom.atom, "%s *", old);
- free(old);
- }
arg->typecast.type = arg->atom.atom;
arg->typecast.item = item_arg;
type = process_arg_token(event, item_arg, &token, type);
@@ -1540,7 +1629,7 @@ process_str(struct event *event __unused, struct print_arg *arg, char **tok)
enum event_type type;
char *token;
- if (read_expected(EVENT_DELIM, (char *)"(") < 0)
+ if (read_expected(EVENT_DELIM, "(") < 0)
return EVENT_ERROR;
if (read_expect_type(EVENT_ITEM, &token) < 0)
@@ -1550,7 +1639,7 @@ process_str(struct event *event __unused, struct print_arg *arg, char **tok)
arg->string.string = token;
arg->string.offset = -1;
- if (read_expected(EVENT_DELIM, (char *)")") < 0)
+ if (read_expected(EVENT_DELIM, ")") < 0)
return EVENT_ERROR;
type = read_token(&token);
@@ -1637,12 +1726,18 @@ process_arg_token(struct event *event, struct print_arg *arg,
static int event_read_print_args(struct event *event, struct print_arg **list)
{
- enum event_type type;
+ enum event_type type = EVENT_ERROR;
struct print_arg *arg;
char *token;
int args = 0;
do {
+ if (type == EVENT_NEWLINE) {
+ free_token(token);
+ type = read_token_item(&token);
+ continue;
+ }
+
arg = malloc_or_die(sizeof(*arg));
memset(arg, 0, sizeof(*arg));
@@ -1683,18 +1778,19 @@ static int event_read_print(struct event *event)
char *token;
int ret;
- if (read_expected_item(EVENT_ITEM, (char *)"print") < 0)
+ if (read_expected_item(EVENT_ITEM, "print") < 0)
return -1;
- if (read_expected(EVENT_ITEM, (char *)"fmt") < 0)
+ if (read_expected(EVENT_ITEM, "fmt") < 0)
return -1;
- if (read_expected(EVENT_OP, (char *)":") < 0)
+ if (read_expected(EVENT_OP, ":") < 0)
return -1;
if (read_expect_type(EVENT_DQUOTE, &token) < 0)
goto fail;
+ concat:
event->print_fmt.format = token;
event->print_fmt.args = NULL;
@@ -1704,7 +1800,22 @@ static int event_read_print(struct event *event)
if (type == EVENT_NONE)
return 0;
- if (test_type_token(type, token, EVENT_DELIM, (char *)","))
+ /* Handle concatination of print lines */
+ if (type == EVENT_DQUOTE) {
+ char *cat;
+
+ cat = malloc_or_die(strlen(event->print_fmt.format) +
+ strlen(token) + 1);
+ strcpy(cat, event->print_fmt.format);
+ strcat(cat, token);
+ free_token(token);
+ free_token(event->print_fmt.format);
+ event->print_fmt.format = NULL;
+ token = cat;
+ goto concat;
+ }
+
+ if (test_type_token(type, token, EVENT_DELIM, ","))
goto fail;
free_token(token);
@@ -1713,7 +1824,7 @@ static int event_read_print(struct event *event)
if (ret < 0)
return -1;
- return 0;
+ return ret;
fail:
free_token(token);
@@ -1822,37 +1933,67 @@ static int get_common_info(const char *type, int *offset, int *size)
return 0;
}
-int trace_parse_common_type(void *data)
+static int __parse_common(void *data, int *size, int *offset,
+ const char *name)
{
- static int type_offset;
- static int type_size;
int ret;
- if (!type_size) {
- ret = get_common_info("common_type",
- &type_offset,
- &type_size);
+ if (!*size) {
+ ret = get_common_info(name, offset, size);
if (ret < 0)
return ret;
}
- return read_size(data + type_offset, type_size);
+ return read_size(data + *offset, *size);
+}
+
+int trace_parse_common_type(void *data)
+{
+ static int type_offset;
+ static int type_size;
+
+ return __parse_common(data, &type_size, &type_offset,
+ "common_type");
}
static int parse_common_pid(void *data)
{
static int pid_offset;
static int pid_size;
+
+ return __parse_common(data, &pid_size, &pid_offset,
+ "common_pid");
+}
+
+static int parse_common_pc(void *data)
+{
+ static int pc_offset;
+ static int pc_size;
+
+ return __parse_common(data, &pc_size, &pc_offset,
+ "common_preempt_count");
+}
+
+static int parse_common_flags(void *data)
+{
+ static int flags_offset;
+ static int flags_size;
+
+ return __parse_common(data, &flags_size, &flags_offset,
+ "common_flags");
+}
+
+static int parse_common_lock_depth(void *data)
+{
+ static int ld_offset;
+ static int ld_size;
int ret;
- if (!pid_size) {
- ret = get_common_info("common_pid",
- &pid_offset,
- &pid_size);
- if (ret < 0)
- return ret;
- }
+ ret = __parse_common(data, &ld_size, &ld_offset,
+ "common_lock_depth");
+ if (ret < 0)
+ return -1;
- return read_size(data + pid_offset, pid_size);
+ return ret;
}
struct event *trace_find_event(int id)
@@ -1871,6 +2012,7 @@ static unsigned long long eval_num_arg(void *data, int size,
{
unsigned long long val = 0;
unsigned long long left, right;
+ struct print_arg *larg;
switch (arg->type) {
case PRINT_NULL:
@@ -1897,6 +2039,26 @@ static unsigned long long eval_num_arg(void *data, int size,
return 0;
break;
case PRINT_OP:
+ if (strcmp(arg->op.op, "[") == 0) {
+ /*
+ * Arrays are special, since we don't want
+ * to read the arg as is.
+ */
+ if (arg->op.left->type != PRINT_FIELD)
+ goto default_op; /* oops, all bets off */
+ larg = arg->op.left;
+ if (!larg->field.field) {
+ larg->field.field =
+ find_any_field(event, larg->field.name);
+ if (!larg->field.field)
+ die("field %s not found", larg->field.name);
+ }
+ right = eval_num_arg(data, size, event, arg->op.right);
+ val = read_size(data + larg->field.field->offset +
+ right * long_size, long_size);
+ break;
+ }
+ default_op:
left = eval_num_arg(data, size, event, arg->op.left);
right = eval_num_arg(data, size, event, arg->op.right);
switch (arg->op.op[0]) {
@@ -1947,6 +2109,12 @@ static unsigned long long eval_num_arg(void *data, int size,
die("unknown op '%s'", arg->op.op);
val = left == right;
break;
+ case '-':
+ val = left - right;
+ break;
+ case '+':
+ val = left + right;
+ break;
default:
die("unknown op '%s'", arg->op.op);
}
@@ -2145,8 +2313,9 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
case 'u':
case 'x':
case 'i':
- bptr = (void *)(((unsigned long)bptr + (long_size - 1)) &
- ~(long_size - 1));
+ /* the pointers are always 4 bytes aligned */
+ bptr = (void *)(((unsigned long)bptr + 3) &
+ ~3);
switch (ls) {
case 0:
case 1:
@@ -2270,7 +2439,27 @@ static void pretty_print(void *data, int size, struct event *event)
for (; *ptr; ptr++) {
ls = 0;
- if (*ptr == '%') {
+ if (*ptr == '\\') {
+ ptr++;
+ switch (*ptr) {
+ case 'n':
+ printf("\n");
+ break;
+ case 't':
+ printf("\t");
+ break;
+ case 'r':
+ printf("\r");
+ break;
+ case '\\':
+ printf("\\");
+ break;
+ default:
+ printf("%c", *ptr);
+ break;
+ }
+
+ } else if (*ptr == '%') {
saveptr = ptr;
show_func = 0;
cont_process:
@@ -2377,6 +2566,41 @@ static inline int log10_cpu(int nb)
return 1;
}
+static void print_lat_fmt(void *data, int size __unused)
+{
+ unsigned int lat_flags;
+ unsigned int pc;
+ int lock_depth;
+ int hardirq;
+ int softirq;
+
+ lat_flags = parse_common_flags(data);
+ pc = parse_common_pc(data);
+ lock_depth = parse_common_lock_depth(data);
+
+ hardirq = lat_flags & TRACE_FLAG_HARDIRQ;
+ softirq = lat_flags & TRACE_FLAG_SOFTIRQ;
+
+ printf("%c%c%c",
+ (lat_flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
+ (lat_flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
+ 'X' : '.',
+ (lat_flags & TRACE_FLAG_NEED_RESCHED) ?
+ 'N' : '.',
+ (hardirq && softirq) ? 'H' :
+ hardirq ? 'h' : softirq ? 's' : '.');
+
+ if (pc)
+ printf("%x", pc);
+ else
+ printf(".");
+
+ if (lock_depth < 0)
+ printf(".");
+ else
+ printf("%d", lock_depth);
+}
+
/* taken from Linux, written by Frederic Weisbecker */
static void print_graph_cpu(int cpu)
{
@@ -2620,6 +2844,11 @@ pretty_print_func_ent(void *data, int size, struct event *event,
printf(" | ");
+ if (latency_format) {
+ print_lat_fmt(data, size);
+ printf(" | ");
+ }
+
field = find_field(event, "func");
if (!field)
die("function entry does not have func field");
@@ -2663,6 +2892,11 @@ pretty_print_func_ret(void *data, int size __unused, struct event *event,
printf(" | ");
+ if (latency_format) {
+ print_lat_fmt(data, size);
+ printf(" | ");
+ }
+
field = find_field(event, "rettime");
if (!field)
die("can't find rettime in return graph");
@@ -2724,7 +2958,7 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs,
event = trace_find_event(type);
if (!event) {
- printf("ug! no event found for type %d\n", type);
+ warning("ug! no event found for type %d", type);
return;
}
@@ -2734,9 +2968,20 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs,
return pretty_print_func_graph(data, size, event, cpu,
pid, comm, secs, usecs);
- printf("%16s-%-5d [%03d] %5lu.%09Lu: %s: ",
- comm, pid, cpu,
- secs, nsecs, event->name);
+ if (latency_format) {
+ printf("%8.8s-%-5d %3d",
+ comm, pid, cpu);
+ print_lat_fmt(data, size);
+ } else
+ printf("%16s-%-5d [%03d]", comm, pid, cpu);
+
+ printf(" %5lu.%06lu: %s: ", secs, usecs, event->name);
+
+ if (event->flags & EVENT_FL_FAILED) {
+ printf("EVENT '%s' FAILED TO PARSE\n",
+ event->name);
+ return;
+ }
pretty_print(data, size, event);
printf("\n");
@@ -2807,46 +3052,71 @@ static void print_args(struct print_arg *args)
}
}
-static void parse_header_field(char *type,
+static void parse_header_field(const char *field,
int *offset, int *size)
{
char *token;
+ int type;
- if (read_expected(EVENT_ITEM, (char *)"field") < 0)
+ if (read_expected(EVENT_ITEM, "field") < 0)
return;
- if (read_expected(EVENT_OP, (char *)":") < 0)
+ if (read_expected(EVENT_OP, ":") < 0)
return;
+
/* type */
if (read_expect_type(EVENT_ITEM, &token) < 0)
- return;
+ goto fail;
free_token(token);
- if (read_expected(EVENT_ITEM, type) < 0)
+ if (read_expected(EVENT_ITEM, field) < 0)
return;
- if (read_expected(EVENT_OP, (char *)";") < 0)
+ if (read_expected(EVENT_OP, ";") < 0)
return;
- if (read_expected(EVENT_ITEM, (char *)"offset") < 0)
+ if (read_expected(EVENT_ITEM, "offset") < 0)
return;
- if (read_expected(EVENT_OP, (char *)":") < 0)
+ if (read_expected(EVENT_OP, ":") < 0)
return;
if (read_expect_type(EVENT_ITEM, &token) < 0)
- return;
+ goto fail;
*offset = atoi(token);
free_token(token);
- if (read_expected(EVENT_OP, (char *)";") < 0)
+ if (read_expected(EVENT_OP, ";") < 0)
return;
- if (read_expected(EVENT_ITEM, (char *)"size") < 0)
+ if (read_expected(EVENT_ITEM, "size") < 0)
return;
- if (read_expected(EVENT_OP, (char *)":") < 0)
+ if (read_expected(EVENT_OP, ":") < 0)
return;
if (read_expect_type(EVENT_ITEM, &token) < 0)
- return;
+ goto fail;
*size = atoi(token);
free_token(token);
- if (read_expected(EVENT_OP, (char *)";") < 0)
- return;
- if (read_expect_type(EVENT_NEWLINE, &token) < 0)
+ if (read_expected(EVENT_OP, ";") < 0)
return;
+ type = read_token(&token);
+ if (type != EVENT_NEWLINE) {
+ /* newer versions of the kernel have a "signed" type */
+ if (type != EVENT_ITEM)
+ goto fail;
+
+ if (strcmp(token, "signed") != 0)
+ goto fail;
+
+ free_token(token);
+
+ if (read_expected(EVENT_OP, ":") < 0)
+ return;
+
+ if (read_expect_type(EVENT_ITEM, &token))
+ goto fail;
+
+ free_token(token);
+ if (read_expected(EVENT_OP, ";") < 0)
+ return;
+
+ if (read_expect_type(EVENT_NEWLINE, &token))
+ goto fail;
+ }
+ fail:
free_token(token);
}
@@ -2854,11 +3124,11 @@ int parse_header_page(char *buf, unsigned long size)
{
init_input_buf(buf, size);
- parse_header_field((char *)"timestamp", &header_page_ts_offset,
+ parse_header_field("timestamp", &header_page_ts_offset,
&header_page_ts_size);
- parse_header_field((char *)"commit", &header_page_size_offset,
+ parse_header_field("commit", &header_page_size_offset,
&header_page_size_size);
- parse_header_field((char *)"data", &header_page_data_offset,
+ parse_header_field("data", &header_page_data_offset,
&header_page_data_size);
return 0;
@@ -2909,6 +3179,9 @@ int parse_ftrace_file(char *buf, unsigned long size)
if (ret < 0)
die("failed to read ftrace event print fmt");
+ /* New ftrace handles args */
+ if (ret > 0)
+ return 0;
/*
* The arguments for ftrace files are parsed by the fields.
* Set up the fields as their arguments.
@@ -2926,7 +3199,7 @@ int parse_ftrace_file(char *buf, unsigned long size)
return 0;
}
-int parse_event_file(char *buf, unsigned long size, char *system__unused __unused)
+int parse_event_file(char *buf, unsigned long size, char *sys)
{
struct event *event;
int ret;
@@ -2946,12 +3219,18 @@ int parse_event_file(char *buf, unsigned long size, char *system__unused __unuse
die("failed to read event id");
ret = event_read_format(event);
- if (ret < 0)
- die("failed to read event format");
+ if (ret < 0) {
+ warning("failed to read event format for %s", event->name);
+ goto event_failed;
+ }
ret = event_read_print(event);
- if (ret < 0)
- die("failed to read event print fmt");
+ if (ret < 0) {
+ warning("failed to read event print fmt for %s", event->name);
+ goto event_failed;
+ }
+
+ event->system = strdup(sys);
#define PRINT_ARGS 0
if (PRINT_ARGS && event->print_fmt.args)
@@ -2959,6 +3238,12 @@ int parse_event_file(char *buf, unsigned long size, char *system__unused __unuse
add_event(event);
return 0;
+
+ event_failed:
+ event->flags |= EVENT_FL_FAILED;
+ /* still add it even if it failed */
+ add_event(event);
+ return -1;
}
void parse_set_info(int nr_cpus, int long_sz)
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 1b5c847d2c22..44292e06cca4 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -458,9 +458,8 @@ struct record *trace_read_data(int cpu)
return data;
}
-void trace_report(void)
+void trace_report(int fd)
{
- const char *input_file = "trace.info";
char buf[BUFSIZ];
char test[] = { 23, 8, 68 };
char *version;
@@ -468,9 +467,7 @@ void trace_report(void)
int show_funcs = 0;
int show_printk = 0;
- input_fd = open(input_file, O_RDONLY);
- if (input_fd < 0)
- die("opening '%s'\n", input_file);
+ input_fd = fd;
read_or_die(buf, 3);
if (memcmp(buf, test, 3) != 0)
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 693f815c9429..f6637c2fa1fe 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -1,5 +1,5 @@
-#ifndef _TRACE_EVENTS_H
-#define _TRACE_EVENTS_H
+#ifndef __PERF_TRACE_EVENTS_H
+#define __PERF_TRACE_EVENTS_H
#include "parse-events.h"
@@ -26,6 +26,9 @@ enum {
enum format_flags {
FIELD_IS_ARRAY = 1,
FIELD_IS_POINTER = 2,
+ FIELD_IS_SIGNED = 4,
+ FIELD_IS_STRING = 8,
+ FIELD_IS_DYNAMIC = 16,
};
struct format_field {
@@ -132,15 +135,18 @@ struct event {
int flags;
struct format format;
struct print_fmt print_fmt;
+ char *system;
};
enum {
- EVENT_FL_ISFTRACE = 1,
- EVENT_FL_ISPRINT = 2,
- EVENT_FL_ISBPRINT = 4,
- EVENT_FL_ISFUNC = 8,
- EVENT_FL_ISFUNCENT = 16,
- EVENT_FL_ISFUNCRET = 32,
+ EVENT_FL_ISFTRACE = 0x01,
+ EVENT_FL_ISPRINT = 0x02,
+ EVENT_FL_ISBPRINT = 0x04,
+ EVENT_FL_ISFUNC = 0x08,
+ EVENT_FL_ISFUNCENT = 0x10,
+ EVENT_FL_ISFUNCRET = 0x20,
+
+ EVENT_FL_FAILED = 0x80000000
};
struct record {
@@ -154,7 +160,7 @@ struct record *trace_read_data(int cpu);
void parse_set_info(int nr_cpus, int long_sz);
-void trace_report(void);
+void trace_report(int fd);
void *malloc_or_die(unsigned int size);
@@ -166,7 +172,7 @@ void print_funcs(void);
void print_printk(void);
int parse_ftrace_file(char *buf, unsigned long size);
-int parse_event_file(char *buf, unsigned long size, char *system);
+int parse_event_file(char *buf, unsigned long size, char *sys);
void print_event(int cpu, void *data, int size, unsigned long long nsecs,
char *comm);
@@ -233,6 +239,8 @@ extern int header_page_size_size;
extern int header_page_data_offset;
extern int header_page_data_size;
+extern int latency_format;
+
int parse_header_page(char *buf, unsigned long size);
int trace_parse_common_type(void *data);
struct event *trace_find_event(int id);
@@ -240,6 +248,15 @@ unsigned long long
raw_field_value(struct event *event, const char *name, void *data);
void *raw_field_ptr(struct event *event, const char *name, void *data);
-void read_tracing_data(struct perf_event_attr *pattrs, int nb_events);
+void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events);
+
+/* taken from kernel/trace/trace.h */
+enum trace_flag_type {
+ TRACE_FLAG_IRQS_OFF = 0x01,
+ TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
+ TRACE_FLAG_NEED_RESCHED = 0x04,
+ TRACE_FLAG_HARDIRQ = 0x08,
+ TRACE_FLAG_SOFTIRQ = 0x10,
+};
-#endif /* _TRACE_EVENTS_H */
+#endif /* __PERF_TRACE_EVENTS_H */
diff --git a/tools/perf/util/types.h b/tools/perf/util/types.h
index 5e75f9005940..7d6b8331f898 100644
--- a/tools/perf/util/types.h
+++ b/tools/perf/util/types.h
@@ -1,5 +1,5 @@
-#ifndef _PERF_TYPES_H
-#define _PERF_TYPES_H
+#ifndef __PERF_TYPES_H
+#define __PERF_TYPES_H
/*
* We define u64 as unsigned long long for every architecture
@@ -14,4 +14,4 @@ typedef signed short s16;
typedef unsigned char u8;
typedef signed char s8;
-#endif /* _PERF_TYPES_H */
+#endif /* __PERF_TYPES_H */
diff --git a/tools/perf/util/values.h b/tools/perf/util/values.h
index cadf8cf2a590..2fa967e1a88a 100644
--- a/tools/perf/util/values.h
+++ b/tools/perf/util/values.h
@@ -1,5 +1,5 @@
-#ifndef _PERF_VALUES_H
-#define _PERF_VALUES_H
+#ifndef __PERF_VALUES_H
+#define __PERF_VALUES_H
#include "types.h"
@@ -24,4 +24,4 @@ void perf_read_values_add_value(struct perf_read_values *values,
void perf_read_values_display(FILE *fp, struct perf_read_values *values,
int raw);
-#endif /* _PERF_VALUES_H */
+#endif /* __PERF_VALUES_H */