summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-02-06 01:45:16 -0500
committerSteven Rostedt <srostedt@redhat.com>2009-02-07 20:03:33 -0500
commita81bd80a0b0a405dc0483e2c428332d69da2c79f (patch)
tree8543662a0ad8199bde641c1fd4a42929d684ffaf
parent9a5fd902273d01170fd033691bd70b142baa7309 (diff)
downloadlinux-a81bd80a0b0a405dc0483e2c428332d69da2c79f.tar.gz
linux-a81bd80a0b0a405dc0483e2c428332d69da2c79f.tar.bz2
linux-a81bd80a0b0a405dc0483e2c428332d69da2c79f.zip
ring-buffer: use generic version of in_nmi
Impact: clean up Now that a generic in_nmi is available, this patch removes the special code in the ring_buffer and implements the in_nmi generic version instead. With this change, I was also able to rename the "arch_ftrace_nmi_enter" back to "ftrace_nmi_enter" and remove the code from the ring buffer. Signed-off-by: Steven Rostedt <srostedt@redhat.com>
-rw-r--r--arch/x86/kernel/ftrace.c4
-rw-r--r--include/linux/ftrace_irq.h8
-rw-r--r--kernel/trace/ring_buffer.c43
3 files changed, 15 insertions, 40 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 918073c6681b..d74d75e0952d 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -113,7 +113,7 @@ static void ftrace_mod_code(void)
MCOUNT_INSN_SIZE);
}
-void arch_ftrace_nmi_enter(void)
+void ftrace_nmi_enter(void)
{
atomic_inc(&nmi_running);
/* Must have nmi_running seen before reading write flag */
@@ -124,7 +124,7 @@ void arch_ftrace_nmi_enter(void)
}
}
-void arch_ftrace_nmi_exit(void)
+void ftrace_nmi_exit(void)
{
/* Finish all executions before clearing nmi_running */
smp_wmb();
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 29de6779a963..dca7bf8cffe2 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -3,14 +3,6 @@
#ifdef CONFIG_FTRACE_NMI_ENTER
-extern void arch_ftrace_nmi_enter(void);
-extern void arch_ftrace_nmi_exit(void);
-#else
-static inline void arch_ftrace_nmi_enter(void) { }
-static inline void arch_ftrace_nmi_exit(void) { }
-#endif
-
-#ifdef CONFIG_RING_BUFFER
extern void ftrace_nmi_enter(void);
extern void ftrace_nmi_exit(void);
#else
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a60a6a852f42..5ee344417cd5 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -8,6 +8,7 @@
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
+#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/mutex.h>
@@ -20,35 +21,6 @@
#include "trace.h"
/*
- * Since the write to the buffer is still not fully lockless,
- * we must be careful with NMIs. The locks in the writers
- * are taken when a write crosses to a new page. The locks
- * protect against races with the readers (this will soon
- * be fixed with a lockless solution).
- *
- * Because we can not protect against NMIs, and we want to
- * keep traces reentrant, we need to manage what happens
- * when we are in an NMI.
- */
-static DEFINE_PER_CPU(int, rb_in_nmi);
-
-void ftrace_nmi_enter(void)
-{
- __get_cpu_var(rb_in_nmi)++;
- /* call arch specific handler too */
- arch_ftrace_nmi_enter();
-}
-
-void ftrace_nmi_exit(void)
-{
- arch_ftrace_nmi_exit();
- __get_cpu_var(rb_in_nmi)--;
- /* NMIs are not recursive */
- WARN_ON_ONCE(__get_cpu_var(rb_in_nmi));
-}
-
-
-/*
* A fast way to enable or disable all ring buffers is to
* call tracing_on or tracing_off. Turning off the ring buffers
* prevents all ring buffers from being recorded to.
@@ -1027,12 +999,23 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
local_irq_save(flags);
/*
+ * Since the write to the buffer is still not
+ * fully lockless, we must be careful with NMIs.
+ * The locks in the writers are taken when a write
+ * crosses to a new page. The locks protect against
+ * races with the readers (this will soon be fixed
+ * with a lockless solution).
+ *
+ * Because we can not protect against NMIs, and we
+ * want to keep traces reentrant, we need to manage
+ * what happens when we are in an NMI.
+ *
* NMIs can happen after we take the lock.
* If we are in an NMI, only take the lock
* if it is not already taken. Otherwise
* simply fail.
*/
- if (unlikely(__get_cpu_var(rb_in_nmi))) {
+ if (unlikely(in_nmi())) {
if (!__raw_spin_trylock(&cpu_buffer->lock))
goto out_unlock;
} else