summaryrefslogtreecommitdiffstats
path: root/kernel/smp.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2023-10-17 21:40:46 +0200
committerThomas Gleixner <tglx@linutronix.de>2023-10-17 21:40:46 +0200
commita940daa52167e9db8ecce82213813b735a9d9f23 (patch)
treed5346c51351ccd1da7532cbec3127e6b73ec59c9 /kernel/smp.c
parent32e4fa37fa667fdf53499b9de92737dc75199d8e (diff)
parent58720809f52779dc0f08e53e54b014209d13eebb (diff)
downloadlinux-a940daa52167e9db8ecce82213813b735a9d9f23.tar.gz
linux-a940daa52167e9db8ecce82213813b735a9d9f23.tar.bz2
linux-a940daa52167e9db8ecce82213813b735a9d9f23.zip
Merge branch 'linus' into smp/core
Pull in upstream to get the fixes so depending changes can be applied.
Diffstat (limited to 'kernel/smp.c')
-rw-r--r--kernel/smp.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index 822fabb7e3e1..8c714583786b 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -46,6 +46,8 @@ static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
+static DEFINE_PER_CPU(atomic_t, trigger_backtrace) = ATOMIC_INIT(1);
+
static void __flush_smp_call_function_queue(bool warn_cpu_offline);
int smpcfd_prepare_cpu(unsigned int cpu)
@@ -253,13 +255,15 @@ static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, in
*bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
}
if (cpu >= 0) {
- dump_cpu_task(cpu);
+ if (atomic_cmpxchg_acquire(&per_cpu(trigger_backtrace, cpu), 1, 0))
+ dump_cpu_task(cpu);
if (!cpu_cur_csd) {
pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
arch_send_call_function_single_ipi(cpu);
}
}
- dump_stack();
+ if (firsttime)
+ dump_stack();
*ts1 = ts2;
return false;
@@ -433,9 +437,14 @@ static void __flush_smp_call_function_queue(bool warn_cpu_offline)
struct llist_node *entry, *prev;
struct llist_head *head;
static bool warned;
+ atomic_t *tbt;
lockdep_assert_irqs_disabled();
+ /* Allow waiters to send backtrace NMI from here onwards */
+ tbt = this_cpu_ptr(&trigger_backtrace);
+ atomic_set_release(tbt, 1);
+
head = this_cpu_ptr(&call_single_queue);
entry = llist_del_all(head);
entry = llist_reverse_order(entry);