diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-05-12 21:20:47 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 23:39:13 +0200 |
commit | 0075fa80305f3231a2d5df97b00d7f55a48ea27e (patch) | |
tree | 45be2ca5fcf325cbf4fa7a63ab09d8cac0363a3f | |
parent | f06c38103ea9dbca27c3f4d77f444ddefb5477cd (diff) | |
download | linux-0075fa80305f3231a2d5df97b00d7f55a48ea27e.tar.gz linux-0075fa80305f3231a2d5df97b00d7f55a48ea27e.tar.bz2 linux-0075fa80305f3231a2d5df97b00d7f55a48ea27e.zip |
ftrace: extend sysprof plugin
add per CPU hrtimers.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | kernel/trace/trace_sysprof.c | 67 |
1 files changed, 63 insertions, 4 deletions
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 6c139bc1be7e..ba55b871b3d9 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c @@ -5,19 +5,76 @@ * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * */ -#include <linux/module.h> -#include <linux/fs.h> -#include <linux/debugfs.h> #include <linux/kallsyms.h> +#include <linux/debugfs.h> +#include <linux/hrtimer.h> #include <linux/uaccess.h> -#include <linux/marker.h> #include <linux/ftrace.h> +#include <linux/module.h> +#include <linux/fs.h> #include "trace.h" static struct trace_array *ctx_trace; static int __read_mostly tracer_enabled; +static const unsigned long sample_period = 1000000; + +/* + * Per CPU hrtimers that do the profiling: + */ +static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer); + +static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer) +{ + /* trace here */ + panic_timeout++; + + hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period)); + + return HRTIMER_RESTART; +} + +static void start_stack_timer(int cpu) +{ + struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); + + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer->function = stack_trace_timer_fn; + hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; + + hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); +} + +static void start_stack_timers(void) +{ + cpumask_t saved_mask = current->cpus_allowed; + int cpu; + + for_each_online_cpu(cpu) { + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + start_stack_timer(cpu); + printk("started timer on cpu%d\n", cpu); + } + set_cpus_allowed_ptr(current, &saved_mask); +} + +static void stop_stack_timer(int cpu) +{ + struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); + + hrtimer_cancel(hrtimer); + printk("cancelled timer on cpu%d\n", cpu); +} + +static void stop_stack_timers(void) +{ + int cpu; + + for_each_online_cpu(cpu) + stop_stack_timer(cpu); +} + static notrace void stack_reset(struct trace_array *tr) { int cpu; @@ -31,11 +88,13 @@ static notrace void stack_reset(struct trace_array *tr) static notrace void start_stack_trace(struct trace_array *tr) { stack_reset(tr); + start_stack_timers(); tracer_enabled = 1; } static notrace void stop_stack_trace(struct trace_array *tr) { + stop_stack_timers(); tracer_enabled = 0; } |