summaryrefslogtreecommitdiffstats
path: root/include/linux/nmi.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-08-03 09:31:54 +0200
committerIngo Molnar <mingo@elte.hu>2009-08-03 09:56:52 +0200
commit47cab6a722d44c71c4f8224017ef548522243cf4 (patch)
tree477bbe67a203d9704792d6ce46a9f1199587ddfe /include/linux/nmi.h
parentc1dc0b9c0c8979ce4d411caadff5c0d79dee58bc (diff)
downloadlinux-stable-47cab6a722d44c71c4f8224017ef548522243cf4.tar.gz
linux-stable-47cab6a722d44c71c4f8224017ef548522243cf4.tar.bz2
linux-stable-47cab6a722d44c71c4f8224017ef548522243cf4.zip
debug lockups: Improve lockup detection, fix generic arch fallback
As Andrew noted, my previous patch ("debug lockups: Improve lockup detection") broke/removed SysRq-L support from architecture that do not provide a __trigger_all_cpu_backtrace implementation. Restore a fallback path and clean up the SysRq-L machinery a bit: - Rename the arch method to arch_trigger_all_cpu_backtrace() - Simplify the define - Document the method a bit - in the hope of more architectures adding support for it. [ The patch touches Sparc code for the rename. ] Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: "David S. Miller" <davem@davemloft.net> LKML-Reference: <20090802140809.7ec4bb6b.akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/nmi.h')
-rw-r--r--include/linux/nmi.h19
1 files changed, 17 insertions, 2 deletions
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 29af2d5df097..b752e807adde 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -28,8 +28,23 @@ static inline void acpi_nmi_disable(void) { }
static inline void acpi_nmi_enable(void) { }
#endif
-#ifndef trigger_all_cpu_backtrace
-#define trigger_all_cpu_backtrace() do { } while (0)
+/*
+ * Create trigger_all_cpu_backtrace() out of the arch-provided
+ * base function. Return whether such support was available,
+ * to allow calling code to fall back to some other mechanism:
+ */
+#ifdef arch_trigger_all_cpu_backtrace
+static inline bool trigger_all_cpu_backtrace(void)
+{
+ arch_trigger_all_cpu_backtrace();
+
+ return true;
+}
+#else
+static inline bool trigger_all_cpu_backtrace(void)
+{
+ return false;
+}
#endif
#endif