summaryrefslogtreecommitdiffstats
path: root/kernel/watchdog.c
diff options
context:
space:
mode:
authorPetr Mladek <pmladek@suse.com>2021-04-29 22:54:33 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-05-19 10:13:00 +0200
commit5b66867966bc04652d85d58b8500a22b99aa5dad (patch)
treea246b85a77a32ec788e653fbdb87ee9f2b6bc94f /kernel/watchdog.c
parenta68c246065b6042bfdb9177527fcf0e8f93dff3d (diff)
downloadlinux-stable-5b66867966bc04652d85d58b8500a22b99aa5dad.tar.gz
linux-stable-5b66867966bc04652d85d58b8500a22b99aa5dad.tar.bz2
linux-stable-5b66867966bc04652d85d58b8500a22b99aa5dad.zip
watchdog: fix barriers when printing backtraces from all CPUs
[ Upstream commit 9f113bf760ca90d709f8f89a733d10abb1f04a83 ] Any parallel softlockup reports are skipped when one CPU is already printing backtraces from all CPUs. The exclusive rights are synchronized using one bit in soft_lockup_nmi_warn. There is also one memory barrier that does not make much sense. Use two barriers on the right location to prevent mixing two reports. [pmladek@suse.com: use bit lock operations to prevent multiple soft-lockup reports] Link: https://lkml.kernel.org/r/YFSVsLGVWMXTvlbk@alley Link: https://lkml.kernel.org/r/20210311122130.6788-6-pmladek@suse.com Signed-off-by: Petr Mladek <pmladek@suse.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Laurence Oberman <loberman@redhat.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vincent Whitchurch <vincent.whitchurch@axis.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'kernel/watchdog.c')
-rw-r--r--kernel/watchdog.c17
1 files changed, 6 insertions, 11 deletions
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 122e272ad7f2..01bf977090dc 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -393,11 +393,12 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
if (kvm_check_and_clear_guest_paused())
return HRTIMER_RESTART;
+ /*
+ * Prevent multiple soft-lockup reports if one cpu is already
+ * engaged in dumping all cpu back traces.
+ */
if (softlockup_all_cpu_backtrace) {
- /* Prevent multiple soft-lockup reports if one cpu is already
- * engaged in dumping cpu back traces
- */
- if (test_and_set_bit(0, &soft_lockup_nmi_warn))
+ if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
return HRTIMER_RESTART;
}
@@ -415,14 +416,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
dump_stack();
if (softlockup_all_cpu_backtrace) {
- /* Avoid generating two back traces for current
- * given that one is already made above
- */
trigger_allbutself_cpu_backtrace();
-
- clear_bit(0, &soft_lockup_nmi_warn);
- /* Barrier to sync with other cpus */
- smp_mb__after_atomic();
+ clear_bit_unlock(0, &soft_lockup_nmi_warn);
}
add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);