summaryrefslogtreecommitdiffstats
path: root/kernel/watchdog.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-09-12 21:37:04 +0200
committerIngo Molnar <mingo@kernel.org>2017-09-14 11:41:05 +0200
commit941154bd6937a710ae9193a3c733c0029e5ae7b8 (patch)
treedede6edfb0306f0ae368d892edbf32f06f24e30b /kernel/watchdog.c
parent20d853fd0703b1d73c35a22024c0d4fcbcc57c8c (diff)
downloadlinux-stable-941154bd6937a710ae9193a3c733c0029e5ae7b8.tar.gz
linux-stable-941154bd6937a710ae9193a3c733c0029e5ae7b8.tar.bz2
linux-stable-941154bd6937a710ae9193a3c733c0029e5ae7b8.zip
watchdog/hardlockup/perf: Prevent CPU hotplug deadlock
The following deadlock is possible in the watchdog hotplug code: cpus_write_lock() ... takedown_cpu() smpboot_park_threads() smpboot_park_thread() kthread_park() ->park() := watchdog_disable() watchdog_nmi_disable() perf_event_release_kernel(); put_event() _free_event() ->destroy() := hw_perf_event_destroy() x86_release_hardware() release_ds_buffers() get_online_cpus() when a per cpu watchdog perf event is destroyed which drops the last reference to the PMU hardware. The cleanup code there invokes get_online_cpus() which instantly deadlocks because the hotplug percpu rwsem is write locked. To solve this add a deferring mechanism: cpus_write_lock() kthread_park() watchdog_nmi_disable(deferred) perf_event_disable(event); move_event_to_deferred(event); .... cpus_write_unlock() cleaup_deferred_events() perf_event_release_kernel() This is still properly serialized against concurrent hotplug via the cpu_add_remove_lock, which is held by the task which initiated the hotplug event. This is also used to handle event destruction when the watchdog threads are parked via other mechanisms than CPU hotplug. Analyzed-by: Peter Zijlstra <peterz@infradead.org> Reported-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Don Zickus <dzickus@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Chris Metcalf <cmetcalf@mellanox.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sebastian Siewior <bigeasy@linutronix.de> Cc: Ulrich Obergfell <uobergfe@redhat.com> Link: http://lkml.kernel.org/r/20170912194146.884469246@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/watchdog.c')
-rw-r--r--kernel/watchdog.c25
1 files changed, 25 insertions, 0 deletions
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index af000956286c..dd1fd59683c5 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -109,8 +109,10 @@ int __weak watchdog_nmi_enable(unsigned int cpu)
{
return 0;
}
+
void __weak watchdog_nmi_disable(unsigned int cpu)
{
+ hardlockup_detector_perf_disable();
}
/*
@@ -193,6 +195,8 @@ __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
#endif
#endif
+static void __lockup_detector_cleanup(void);
+
/*
* Hard-lockup warnings should be triggered after just a few seconds. Soft-
* lockups can have false positives under extreme conditions. So we generally
@@ -631,6 +635,24 @@ static void set_sample_period(void)
}
#endif /* SOFTLOCKUP */
+static void __lockup_detector_cleanup(void)
+{
+ lockdep_assert_held(&watchdog_mutex);
+ hardlockup_detector_perf_cleanup();
+}
+
+/**
+ * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
+ *
+ * Caller must not hold the cpu hotplug rwsem.
+ */
+void lockup_detector_cleanup(void)
+{
+ mutex_lock(&watchdog_mutex);
+ __lockup_detector_cleanup();
+ mutex_unlock(&watchdog_mutex);
+}
+
/**
* lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
*
@@ -665,6 +687,8 @@ static int proc_watchdog_update(void)
watchdog_nmi_reconfigure();
+ __lockup_detector_cleanup();
+
return err;
}
@@ -837,6 +861,7 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write,
}
watchdog_nmi_reconfigure();
+ __lockup_detector_cleanup();
}
mutex_unlock(&watchdog_mutex);