diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-04-28 15:03:43 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-04-28 15:03:43 -0700 |
commit | f20730efbd305d42eded761f6fbd9a59d6125228 (patch) | |
tree | c9c553ff088fd788ac9149ca3f7a5c83cf8eaed6 | |
parent | 586b222d748e91c619d68e9239654ebc7fed9b0c (diff) | |
parent | 5c3124975e15c1fadd5af1c61e4d627cf6d97ba2 (diff) | |
download | linux-stable-f20730efbd305d42eded761f6fbd9a59d6125228.tar.gz linux-stable-f20730efbd305d42eded761f6fbd9a59d6125228.tar.bz2 linux-stable-f20730efbd305d42eded761f6fbd9a59d6125228.zip |
Merge tag 'smp-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull SMP cross-CPU function-call updates from Ingo Molnar:
- Remove diagnostics and adjust config for CSD lock diagnostics
- Add a generic IPI-sending tracepoint, as currently there's no easy
way to instrument IPI origins: it's arch dependent and for some major
architectures it's not even consistently available.
* tag 'smp-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
trace,smp: Trace all smp_function_call*() invocations
trace: Add trace_ipi_send_cpu()
sched, smp: Trace smp callback causing an IPI
smp: reword smp call IPI comment
treewide: Trace IPIs sent via smp_send_reschedule()
irq_work: Trace self-IPIs sent via arch_irq_work_raise()
smp: Trace IPIs sent via arch_send_call_function_ipi_mask()
sched, smp: Trace IPIs sent via send_call_function_single_ipi()
trace: Add trace_ipi_send_cpumask()
kernel/smp: Make csdlock_debug= resettable
locking/csd_lock: Remove per-CPU data indirection from CSD lock debugging
locking/csd_lock: Remove added data from CSD lock debugging
locking/csd_lock: Add Kconfig option for csd_debug default
33 files changed, 217 insertions, 281 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index a38d55b6482c..b1453e0e3c95 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -912,15 +912,14 @@ cs89x0_media= [HW,NET] Format: { rj45 | aui | bnc } - csdlock_debug= [KNL] Enable debug add-ons of cross-CPU function call - handling. When switched on, additional debug data is - printed to the console in case a hanging CPU is - detected, and that CPU is pinged again in order to try - to resolve the hang situation. - 0: disable csdlock debugging (default) - 1: enable basic csdlock debugging (minor impact) - ext: enable extended csdlock debugging (more impact, - but more data) + csdlock_debug= [KNL] Enable or disable debug add-ons of cross-CPU + function call handling. When switched on, + additional debug data is printed to the console + in case a hanging CPU is detected, and that + CPU is pinged again in order to try to resolve + the hang situation. The default value of this + option depends on the CSD_LOCK_WAIT_DEBUG_DEFAULT + Kconfig option. dasd= [HW,NET] See header of drivers/s390/block/dasd_devmap.c. diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 0ede4b044e86..7439b2377df5 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -562,7 +562,7 @@ handle_ipi(struct pt_regs *regs) } void -smp_send_reschedule(int cpu) +arch_smp_send_reschedule(int cpu) { #ifdef DEBUG_IPI_MSG if (cpu == hard_smp_processor_id()) diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index ad93fe6e4b77..409cfa4675b4 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -292,7 +292,7 @@ static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg) ipi_send_msg_one(cpu, msg); } -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { ipi_send_msg_one(cpu, IPI_RESCHEDULE); } diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index f4a4ac028b6b..87f8d0e5e314 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -48,7 +48,6 @@ #include <asm/mach/arch.h> #include <asm/mpu.h> -#define CREATE_TRACE_POINTS #include <trace/events/ipi.h> /* @@ -749,7 +748,7 @@ void __init set_smp_ipi_range(int ipi_base, int n) ipi_setup(smp_processor_id()); } -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } diff --git a/arch/arm/mach-actions/platsmp.c b/arch/arm/mach-actions/platsmp.c index f26618b43514..7b208e96fbb6 100644 --- a/arch/arm/mach-actions/platsmp.c +++ b/arch/arm/mach-actions/platsmp.c @@ -20,6 +20,8 @@ #include <asm/smp_plat.h> #include <asm/smp_scu.h> +#include <trace/events/ipi.h> + #define OWL_CPU1_ADDR 0x50 #define OWL_CPU1_FLAG 0x5c diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 05fe797e4203..d00d4cbb31b1 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -51,7 +51,6 @@ #include <asm/ptrace.h> #include <asm/virt.h> -#define CREATE_TRACE_POINTS #include <trace/events/ipi.h> DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number); @@ -979,7 +978,7 @@ void __init set_smp_ipi_range(int ipi_base, int n) ipi_setup(smp_processor_id()); } -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index 9c7a20b73ac6..b12e2c3c387f 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c @@ -140,7 +140,7 @@ void smp_send_stop(void) on_each_cpu(ipi_stop, NULL, 1); } -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); } diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c index 4ba93e59370c..4e8bee25b8c6 100644 --- a/arch/hexagon/kernel/smp.c +++ b/arch/hexagon/kernel/smp.c @@ -217,7 +217,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } } -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { send_ipi(cpumask_of(cpu), IPI_RESCHEDULE); } diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index e2cc59db86bc..ea4f009a232b 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -220,11 +220,11 @@ kdump_smp_send_init(void) * Called with preemption disabled. */ void -smp_send_reschedule (int cpu) +arch_smp_send_reschedule (int cpu) { ia64_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); } -EXPORT_SYMBOL_GPL(smp_send_reschedule); +EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); /* * Called with preemption disabled. diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 51f328169a7b..ed167e244cda 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -155,11 +155,11 @@ void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) * it goes straight through and wastes no time serializing * anything. Worst case is that we lose a reschedule ... */ -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { loongson_send_ipi_single(cpu, SMP_RESCHEDULE); } -EXPORT_SYMBOL_GPL(smp_send_reschedule); +EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); irqreturn_t loongson_ipi_interrupt(int irq, void *dev) { diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 4eee29b7845c..aab8981bc32c 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -66,7 +66,7 @@ extern void calculate_cpu_foreign_map(void); * it goes straight through and wastes no time serializing * anything. Worst case is that we lose a reschedule ... */ -static inline void smp_send_reschedule(int cpu) +static inline void arch_smp_send_reschedule(int cpu) { extern const struct plat_smp_ops *mp_ops; /* private */ diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c index e1419095a6f0..0a7a059e2dff 100644 --- a/arch/openrisc/kernel/smp.c +++ b/arch/openrisc/kernel/smp.c @@ -173,7 +173,7 @@ void handle_IPI(unsigned int ipi_msg) } } -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 7dbd92cafae3..b7fc859fa87d 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -246,8 +246,8 @@ void kgdb_roundup_cpus(void) inline void smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); } -void -smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } +void +arch_smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } void smp_send_all_nop(void) diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index bf9744034c3a..265801a3e94c 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -61,6 +61,8 @@ #include <asm/kup.h> #include <asm/fadump.h> +#include <trace/events/ipi.h> + #ifdef DEBUG #include <asm/udbg.h> #define DBG(fmt...) udbg_printf(fmt) @@ -364,12 +366,12 @@ static inline void do_message_pass(int cpu, int msg) #endif } -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { if (likely(smp_ops)) do_message_pass(cpu, PPC_MSG_RESCHEDULE); } -EXPORT_SYMBOL_GPL(smp_send_reschedule); +EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); void arch_send_call_function_single_ipi(int cpu) { diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 6ba68dd6190b..3b70b5f80bd5 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -43,6 +43,7 @@ #include <linux/compiler.h> #include <linux/of.h> #include <linux/irqdomain.h> +#include <linux/smp.h> #include <asm/ftrace.h> #include <asm/reg.h> @@ -80,6 +81,8 @@ #include <asm/dtl.h> #include <asm/plpar_wrappers.h> +#include <trace/events/ipi.h> + #include "book3s.h" #include "book3s_hv.h" diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c index 428532a69762..191424468f10 100644 --- a/arch/powerpc/platforms/powernv/subcore.c +++ b/arch/powerpc/platforms/powernv/subcore.c @@ -20,6 +20,8 @@ #include <asm/opal.h> #include <asm/smp.h> +#include <trace/events/ipi.h> + #include "subcore.h" #include "powernv.h" diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index 5f985a197eff..23e533766a49 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -333,8 +333,8 @@ bool smp_crash_stop_failed(void) } #endif -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { send_ipi_single(cpu, IPI_RESCHEDULE); } -EXPORT_SYMBOL_GPL(smp_send_reschedule); +EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 4df797ab8ca2..70a84748f806 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -553,7 +553,7 @@ void arch_send_call_function_single_ipi(int cpu) * it goes straight through and wastes no time serializing * anything. Worst case is that we lose a reschedule ... */ -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { pcpu_ec_call(pcpu_devices + cpu, ec_schedule); } diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 65924d9ec245..5cf35a774dc7 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -256,7 +256,7 @@ void __init smp_cpus_done(unsigned int max_cpus) (bogosum / (5000/HZ)) % 100); } -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE); } diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index ad8094d955eb..87eaa7719fa2 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c @@ -120,7 +120,7 @@ void cpu_panic(void) struct linux_prom_registers smp_penguin_ctable = { 0 }; -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { /* * CPU model dependent way of implementing IPI generation targeting diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index a55295d1b924..e5964d1d8b37 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1430,7 +1430,7 @@ static unsigned long send_cpu_poke(int cpu) return hv_err; } -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { if (cpu == smp_processor_id()) { WARN_ON_ONCE(preemptible()); diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 9fa795102b93..4e91054c84be 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -99,7 +99,7 @@ static inline void __noreturn play_dead(void) BUG(); } -static inline void smp_send_reschedule(int cpu) +static inline void arch_smp_send_reschedule(int cpu) { smp_ops.smp_send_reschedule(cpu); } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index f25bc3cbb250..a1b08359769b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -27,6 +27,7 @@ #include <linux/swap.h> #include <linux/rwsem.h> #include <linux/cc_platform.h> +#include <linux/smp.h> #include <asm/apic.h> #include <asm/perf_event.h> @@ -41,6 +42,9 @@ #include <asm/fpu/api.h> #include <asm/virtext.h> + +#include <trace/events/ipi.h> + #include "trace.h" #include "svm.h" diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3d852ce84920..2b1d82647195 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -60,7 +60,9 @@ #include <linux/mem_encrypt.h> #include <linux/entry-kvm.h> #include <linux/suspend.h> +#include <linux/smp.h> +#include <trace/events/ipi.h> #include <trace/events/kvm.h> #include <asm/debugreg.h> diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c index 054bd64eab19..07dd6baf18cf 100644 --- a/arch/xtensa/kernel/smp.c +++ b/arch/xtensa/kernel/smp.c @@ -391,7 +391,7 @@ void arch_send_call_function_single_ipi(int cpu) send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); } -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); } diff --git a/include/linux/smp.h b/include/linux/smp.h index 7b93504eed26..91ea4a67f8ca 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -125,8 +125,15 @@ extern void smp_send_stop(void); /* * sends a 'reschedule' event to another CPU: */ -extern void smp_send_reschedule(int cpu); - +extern void arch_smp_send_reschedule(int cpu); +/* + * scheduler_ipi() is inline so can't be passed as callback reason, but the + * callsite IP should be sufficient for root-causing IPIs sent from here. + */ +#define smp_send_reschedule(cpu) ({ \ + trace_ipi_send_cpu(cpu, _RET_IP_, NULL); \ + arch_smp_send_reschedule(cpu); \ +}) /* * Prepare machine for booting other CPUs. diff --git a/include/trace/events/ipi.h b/include/trace/events/ipi.h index 0be71dad6ec0..3de9bfc982ce 100644 --- a/include/trace/events/ipi.h +++ b/include/trace/events/ipi.h @@ -35,6 +35,50 @@ TRACE_EVENT(ipi_raise, TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason) ); +TRACE_EVENT(ipi_send_cpu, + + TP_PROTO(const unsigned int cpu, unsigned long callsite, void *callback), + + TP_ARGS(cpu, callsite, callback), + + TP_STRUCT__entry( + __field(unsigned int, cpu) + __field(void *, callsite) + __field(void *, callback) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->callsite = (void *)callsite; + __entry->callback = callback; + ), + + TP_printk("cpu=%u callsite=%pS callback=%pS", + __entry->cpu, __entry->callsite, __entry->callback) +); + +TRACE_EVENT(ipi_send_cpumask, + + TP_PROTO(const struct cpumask *cpumask, unsigned long callsite, void *callback), + + TP_ARGS(cpumask, callsite, callback), + + TP_STRUCT__entry( + __cpumask(cpumask) + __field(void *, callsite) + __field(void *, callback) + ), + + TP_fast_assign( + __assign_cpumask(cpumask, cpumask_bits(cpumask)); + __entry->callsite = (void *)callsite; + __entry->callback = callback; + ), + + TP_printk("cpumask=%s callsite=%pS callback=%pS", + __get_cpumask(cpumask), __entry->callsite, __entry->callback) +); + DECLARE_EVENT_CLASS(ipi_handler, TP_PROTO(const char *reason), diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 7afa40fe5cc4..2f4fb336dda1 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -22,6 +22,8 @@ #include <asm/processor.h> #include <linux/kasan.h> +#include <trace/events/ipi.h> + static DEFINE_PER_CPU(struct llist_head, raised_list); static DEFINE_PER_CPU(struct llist_head, lazy_list); static DEFINE_PER_CPU(struct task_struct *, irq_workd); @@ -74,6 +76,14 @@ void __weak arch_irq_work_raise(void) */ } +static __always_inline void irq_work_raise(struct irq_work *work) +{ + if (trace_ipi_send_cpu_enabled() && arch_irq_work_has_interrupt()) + trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func); + + arch_irq_work_raise(); +} + /* Enqueue on current CPU, work must already be claimed and preempt disabled */ static void __irq_work_queue_local(struct irq_work *work) { @@ -99,7 +109,7 @@ static void __irq_work_queue_local(struct irq_work *work) /* If the work is "lazy", handle it from next tick if any */ if (!lazy_work || tick_nohz_tick_stopped()) - arch_irq_work_raise(); + irq_work_raise(work); } /* Enqueue the irq work @work on the current CPU */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 54c75af24899..944c3ae39861 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -80,6 +80,7 @@ #define CREATE_TRACE_POINTS #include <linux/sched/rseq_api.h> #include <trace/events/sched.h> +#include <trace/events/ipi.h> #undef CREATE_TRACE_POINTS #include "sched.h" @@ -95,6 +96,9 @@ #include "../../io_uring/io-wq.h" #include "../smpboot.h" +EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu); +EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask); + /* * Export tracepoints that act as a bare tracehook (ie: have no trace event * associated with them) to allow external modules to probe them. @@ -3848,14 +3852,20 @@ void sched_ttwu_pending(void *arg) rq_unlock_irqrestore(rq, &rf); } -void send_call_function_single_ipi(int cpu) +/* + * Prepare the scene for sending an IPI for a remote smp_call + * + * Returns true if the caller can proceed with sending the IPI. + * Returns false otherwise. + */ +bool call_function_single_prep_ipi(int cpu) { - struct rq *rq = cpu_rq(cpu); - - if (!set_nr_if_polling(rq->idle)) - arch_send_call_function_single_ipi(cpu); - else + if (set_nr_if_polling(cpu_rq(cpu)->idle)) { trace_sched_wake_idle_without_ipi(cpu); + return false; + } + + return true; } /* diff --git a/kernel/sched/smp.h b/kernel/sched/smp.h index 2eb23dd0f285..21ac44428bb0 100644 --- a/kernel/sched/smp.h +++ b/kernel/sched/smp.h @@ -6,7 +6,7 @@ extern void sched_ttwu_pending(void *arg); -extern void send_call_function_single_ipi(int cpu); +extern bool call_function_single_prep_ipi(int cpu); #ifdef CONFIG_SMP extern void flush_smp_call_function_queue(void); diff --git a/kernel/smp.c b/kernel/smp.c index 06a413987a14..ab3e5dad6cfe 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -26,68 +26,15 @@ #include <linux/sched/debug.h> #include <linux/jump_label.h> +#include <trace/events/ipi.h> + #include "smpboot.h" #include "sched/smp.h" #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK) -#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG -union cfd_seq_cnt { - u64 val; - struct { - u64 src:16; - u64 dst:16; -#define CFD_SEQ_NOCPU 0xffff - u64 type:4; -#define CFD_SEQ_QUEUE 0 -#define CFD_SEQ_IPI 1 -#define CFD_SEQ_NOIPI 2 -#define CFD_SEQ_PING 3 -#define CFD_SEQ_PINGED 4 -#define CFD_SEQ_HANDLE 5 -#define CFD_SEQ_DEQUEUE 6 -#define CFD_SEQ_IDLE 7 -#define CFD_SEQ_GOTIPI 8 -#define CFD_SEQ_HDLEND 9 - u64 cnt:28; - } u; -}; - -static char *seq_type[] = { - [CFD_SEQ_QUEUE] = "queue", - [CFD_SEQ_IPI] = "ipi", - [CFD_SEQ_NOIPI] = "noipi", - [CFD_SEQ_PING] = "ping", - [CFD_SEQ_PINGED] = "pinged", - [CFD_SEQ_HANDLE] = "handle", - [CFD_SEQ_DEQUEUE] = "dequeue (src CPU 0 == empty)", - [CFD_SEQ_IDLE] = "idle", - [CFD_SEQ_GOTIPI] = "gotipi", - [CFD_SEQ_HDLEND] = "hdlend (src CPU 0 == early)", -}; - -struct cfd_seq_local { - u64 ping; - u64 pinged; - u64 handle; - u64 dequeue; - u64 idle; - u64 gotipi; - u64 hdlend; -}; -#endif - -struct cfd_percpu { - call_single_data_t csd; -#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG - u64 seq_queue; - u64 seq_ipi; - u64 seq_noipi; -#endif -}; - struct call_function_data { - struct cfd_percpu __percpu *pcpu; + call_single_data_t __percpu *csd; cpumask_var_t cpumask; cpumask_var_t cpumask_ipi; }; @@ -110,8 +57,8 @@ int smpcfd_prepare_cpu(unsigned int cpu) free_cpumask_var(cfd->cpumask); return -ENOMEM; } - cfd->pcpu = alloc_percpu(struct cfd_percpu); - if (!cfd->pcpu) { + cfd->csd = alloc_percpu(call_single_data_t); + if (!cfd->csd) { free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask_ipi); return -ENOMEM; @@ -126,7 +73,7 @@ int smpcfd_dead_cpu(unsigned int cpu) free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask_ipi); - free_percpu(cfd->pcpu); + free_percpu(cfd->csd); return 0; } @@ -156,23 +103,49 @@ void __init call_function_init(void) smpcfd_prepare_cpu(smp_processor_id()); } +static __always_inline void +send_call_function_single_ipi(int cpu) +{ + if (call_function_single_prep_ipi(cpu)) { + trace_ipi_send_cpu(cpu, _RET_IP_, + generic_smp_call_function_single_interrupt); + arch_send_call_function_single_ipi(cpu); + } +} + +static __always_inline void +send_call_function_ipi_mask(struct cpumask *mask) +{ + trace_ipi_send_cpumask(mask, _RET_IP_, + generic_smp_call_function_single_interrupt); + arch_send_call_function_ipi_mask(mask); +} + #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG -static DEFINE_STATIC_KEY_FALSE(csdlock_debug_enabled); -static DEFINE_STATIC_KEY_FALSE(csdlock_debug_extended); +static DEFINE_STATIC_KEY_MAYBE(CONFIG_CSD_LOCK_WAIT_DEBUG_DEFAULT, csdlock_debug_enabled); +/* + * Parse the csdlock_debug= kernel boot parameter. + * + * If you need to restore the old "ext" value that once provided + * additional debugging information, reapply the following commits: + * + * de7b09ef658d ("locking/csd_lock: Prepare more CSD lock debugging") + * a5aabace5fb8 ("locking/csd_lock: Add more data to CSD lock debugging") + */ static int __init csdlock_debug(char *str) { + int ret; unsigned int val = 0; - if (str && !strcmp(str, "ext")) { - val = 1; - static_branch_enable(&csdlock_debug_extended); - } else - get_option(&str, &val); - - if (val) - static_branch_enable(&csdlock_debug_enabled); + ret = get_option(&str, &val); + if (ret) { + if (val) + static_branch_enable(&csdlock_debug_enabled); + else + static_branch_disable(&csdlock_debug_enabled); + } return 1; } @@ -181,36 +154,11 @@ __setup("csdlock_debug=", csdlock_debug); static DEFINE_PER_CPU(call_single_data_t *, cur_csd); static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func); static DEFINE_PER_CPU(void *, cur_csd_info); -static DEFINE_PER_CPU(struct cfd_seq_local, cfd_seq_local); static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */ module_param(csd_lock_timeout, ulong, 0444); static atomic_t csd_bug_count = ATOMIC_INIT(0); -static u64 cfd_seq; - -#define CFD_SEQ(s, d, t, c) \ - (union cfd_seq_cnt){ .u.src = s, .u.dst = d, .u.type = t, .u.cnt = c } - -static u64 cfd_seq_inc(unsigned int src, unsigned int dst, unsigned int type) -{ - union cfd_seq_cnt new, old; - - new = CFD_SEQ(src, dst, type, 0); - - do { - old.val = READ_ONCE(cfd_seq); - new.u.cnt = old.u.cnt + 1; - } while (cmpxchg(&cfd_seq, old.val, new.val) != old.val); - - return old.val; -} - -#define cfd_seq_store(var, src, dst, type) \ - do { \ - if (static_branch_unlikely(&csdlock_debug_extended)) \ - var = cfd_seq_inc(src, dst, type); \ - } while (0) /* Record current CSD work for current CPU, NULL to erase. */ static void __csd_lock_record(struct __call_single_data *csd) @@ -244,80 +192,6 @@ static int csd_lock_wait_getcpu(struct __call_single_data *csd) return -1; } -static void cfd_seq_data_add(u64 val, unsigned int src, unsigned int dst, - unsigned int type, union cfd_seq_cnt *data, - unsigned int *n_data, unsigned int now) -{ - union cfd_seq_cnt new[2]; - unsigned int i, j, k; - - new[0].val = val; - new[1] = CFD_SEQ(src, dst, type, new[0].u.cnt + 1); - - for (i = 0; i < 2; i++) { - if (new[i].u.cnt <= now) - new[i].u.cnt |= 0x80000000U; - for (j = 0; j < *n_data; j++) { - if (new[i].u.cnt == data[j].u.cnt) { - /* Direct read value trumps generated one. */ - if (i == 0) - data[j].val = new[i].val; - break; - } - if (new[i].u.cnt < data[j].u.cnt) { - for (k = *n_data; k > j; k--) - data[k].val = data[k - 1].val; - data[j].val = new[i].val; - (*n_data)++; - break; - } - } - if (j == *n_data) { - data[j].val = new[i].val; - (*n_data)++; - } - } -} - -static const char *csd_lock_get_type(unsigned int type) -{ - return (type >= ARRAY_SIZE(seq_type)) ? "?" : seq_type[type]; -} - -static void csd_lock_print_extended(struct __call_single_data *csd, int cpu) -{ - struct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu); - unsigned int srccpu = csd->node.src; - struct call_function_data *cfd = per_cpu_ptr(&cfd_data, srccpu); - struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu); - unsigned int now; - union cfd_seq_cnt data[2 * ARRAY_SIZE(seq_type)]; - unsigned int n_data = 0, i; - - data[0].val = READ_ONCE(cfd_seq); - now = data[0].u.cnt; - - cfd_seq_data_add(pcpu->seq_queue, srccpu, cpu, CFD_SEQ_QUEUE, data, &n_data, now); - cfd_seq_data_add(pcpu->seq_ipi, srccpu, cpu, CFD_SEQ_IPI, data, &n_data, now); - cfd_seq_data_add(pcpu->seq_noipi, srccpu, cpu, CFD_SEQ_NOIPI, data, &n_data, now); - - cfd_seq_data_add(per_cpu(cfd_seq_local.ping, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PING, data, &n_data, now); - cfd_seq_data_add(per_cpu(cfd_seq_local.pinged, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED, data, &n_data, now); - - cfd_seq_data_add(seq->idle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_IDLE, data, &n_data, now); - cfd_seq_data_add(seq->gotipi, CFD_SEQ_NOCPU, cpu, CFD_SEQ_GOTIPI, data, &n_data, now); - cfd_seq_data_add(seq->handle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HANDLE, data, &n_data, now); - cfd_seq_data_add(seq->dequeue, CFD_SEQ_NOCPU, cpu, CFD_SEQ_DEQUEUE, data, &n_data, now); - cfd_seq_data_add(seq->hdlend, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HDLEND, data, &n_data, now); - - for (i = 0; i < n_data; i++) { - pr_alert("\tcsd: cnt(%07x): %04x->%04x %s\n", - data[i].u.cnt & ~0x80000000U, data[i].u.src, - data[i].u.dst, csd_lock_get_type(data[i].u.type)); - } - pr_alert("\tcsd: cnt now: %07x\n", now); -} - /* * Complain if too much time spent waiting. Note that only * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU, @@ -368,8 +242,6 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 * *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request"); } if (cpu >= 0) { - if (static_branch_unlikely(&csdlock_debug_extended)) - csd_lock_print_extended(csd, cpu); dump_cpu_task(cpu); if (!cpu_cur_csd) { pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu); @@ -412,27 +284,7 @@ static __always_inline void csd_lock_wait(struct __call_single_data *csd) smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); } - -static void __smp_call_single_queue_debug(int cpu, struct llist_node *node) -{ - unsigned int this_cpu = smp_processor_id(); - struct cfd_seq_local *seq = this_cpu_ptr(&cfd_seq_local); - struct call_function_data *cfd = this_cpu_ptr(&cfd_data); - struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu); - - cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE); - if (llist_add(node, &per_cpu(call_single_queue, cpu))) { - cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI); - cfd_seq_store(seq->ping, this_cpu, cpu, CFD_SEQ_PING); - send_call_function_single_ipi(cpu); - cfd_seq_store(seq->pinged, this_cpu, cpu, CFD_SEQ_PINGED); - } else { - cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI); - } -} #else -#define cfd_seq_store(var, src, dst, type) - static void csd_lock_record(struct __call_single_data *csd) { } @@ -470,23 +322,29 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); void __smp_call_single_queue(int cpu, struct llist_node *node) { -#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG - if (static_branch_unlikely(&csdlock_debug_extended)) { - unsigned int type; - - type = CSD_TYPE(container_of(node, call_single_data_t, - node.llist)); - if (type == CSD_TYPE_SYNC || type == CSD_TYPE_ASYNC) { - __smp_call_single_queue_debug(cpu, node); - return; - } + /* + * We have to check the type of the CSD before queueing it, because + * once queued it can have its flags cleared by + * flush_smp_call_function_queue() + * even if we haven't sent the smp_call IPI yet (e.g. the stopper + * executes migration_cpu_stop() on the remote CPU). + */ + if (trace_ipi_send_cpu_enabled()) { + call_single_data_t *csd; + smp_call_func_t func; + + csd = container_of(node, call_single_data_t, node.llist); + func = CSD_TYPE(csd) == CSD_TYPE_TTWU ? + sched_ttwu_pending : csd->func; + + trace_ipi_send_cpu(cpu, _RET_IP_, func); } -#endif /* - * The list addition should be visible before sending the IPI - * handler locks the list to pull the entry off it because of - * normal cache coherency rules implied by spinlocks. + * The list addition should be visible to the target CPU when it pops + * the head of the list to pull the entry off it in the IPI handler + * because of normal cache coherency rules implied by the underlying + * llist ops. * * If IPIs can go out of order to the cache coherency protocol * in an architecture, sufficient synchronisation should be added @@ -541,8 +399,6 @@ static int generic_exec_single(int cpu, struct __call_single_data *csd) */ void generic_smp_call_function_single_interrupt(void) { - cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU, - smp_processor_id(), CFD_SEQ_GOTIPI); __flush_smp_call_function_queue(true); } @@ -570,13 +426,7 @@ static void __flush_smp_call_function_queue(bool warn_cpu_offline) lockdep_assert_irqs_disabled(); head = this_cpu_ptr(&call_single_queue); - cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->handle, CFD_SEQ_NOCPU, - smp_processor_id(), CFD_SEQ_HANDLE); entry = llist_del_all(head); - cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->dequeue, - /* Special meaning of source cpu: 0 == queue empty */ - entry ? CFD_SEQ_NOCPU : 0, - smp_processor_id(), CFD_SEQ_DEQUEUE); entry = llist_reverse_order(entry); /* There shouldn't be any pending callbacks on an offline CPU. */ @@ -635,12 +485,8 @@ static void __flush_smp_call_function_queue(bool warn_cpu_offline) } } - if (!entry) { - cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend, - 0, smp_processor_id(), - CFD_SEQ_HDLEND); + if (!entry) return; - } /* * Second; run all !SYNC callbacks. @@ -678,9 +524,6 @@ static void __flush_smp_call_function_queue(bool warn_cpu_offline) */ if (entry) sched_ttwu_pending(entry); - - cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend, CFD_SEQ_NOCPU, - smp_processor_id(), CFD_SEQ_HDLEND); } @@ -704,8 +547,6 @@ void flush_smp_call_function_queue(void) if (llist_empty(this_cpu_ptr(&call_single_queue))) return; - cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU, - smp_processor_id(), CFD_SEQ_IDLE); local_irq_save(flags); /* Get the already pending soft interrupts for RT enabled kernels */ was_pending = local_softirq_pending(); @@ -887,9 +728,9 @@ static void smp_call_function_many_cond(const struct cpumask *mask, int cpu, last_cpu, this_cpu = smp_processor_id(); struct call_function_data *cfd; bool wait = scf_flags & SCF_WAIT; + int nr_cpus = 0, nr_queued = 0; bool run_remote = false; bool run_local = false; - int nr_cpus = 0; lockdep_assert_preemption_disabled(); @@ -929,11 +770,12 @@ static void smp_call_function_many_cond(const struct cpumask *mask, cpumask_clear(cfd->cpumask_ipi); for_each_cpu(cpu, cfd->cpumask) { - struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu); - call_single_data_t *csd = &pcpu->csd; + call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); - if (cond_func && !cond_func(cpu, info)) + if (cond_func && !cond_func(cpu, info)) { + __cpumask_clear_cpu(cpu, cfd->cpumask); continue; + } csd_lock(csd); if (wait) @@ -944,19 +786,20 @@ static void smp_call_function_many_cond(const struct cpumask *mask, csd->node.src = smp_processor_id(); csd->node.dst = cpu; #endif - cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE); if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) { __cpumask_set_cpu(cpu, cfd->cpumask_ipi); nr_cpus++; last_cpu = cpu; - - cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI); - } else { - cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI); } + nr_queued++; } - cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->ping, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PING); + /* + * Trace each smp_function_call_*() as an IPI, actual IPIs + * will be traced with func==generic_smp_call_function_single_ipi(). + */ + if (nr_queued) + trace_ipi_send_cpumask(cfd->cpumask, _RET_IP_, func); /* * Choose the most efficient way to send an IPI. Note that the @@ -966,9 +809,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask, if (nr_cpus == 1) send_call_function_single_ipi(last_cpu); else if (likely(nr_cpus > 1)) - arch_send_call_function_ipi_mask(cfd->cpumask_ipi); - - cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->pinged, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED); + send_call_function_ipi_mask(cfd->cpumask_ipi); } if (run_local && (!cond_func || cond_func(this_cpu, info))) { @@ -983,7 +824,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask, for_each_cpu(cpu, cfd->cpumask) { call_single_data_t *csd; - csd = &per_cpu_ptr(cfd->pcpu, cpu)->csd; + csd = per_cpu_ptr(cfd->csd, cpu); csd_lock_wait(csd); } } diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index be272aa2fc0a..d49d7c5e12ec 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1490,6 +1490,15 @@ config CSD_LOCK_WAIT_DEBUG include the IPI handler function currently executing (if any) and relevant stack traces. +config CSD_LOCK_WAIT_DEBUG_DEFAULT + bool "Default csd_lock_wait() debugging on at boot time" + depends on CSD_LOCK_WAIT_DEBUG + depends on 64BIT + default n + help + This option causes the csdlock_debug= kernel boot parameter to + default to 1 (basic debugging) instead of 0 (no debugging). + endmenu # lock debugging config TRACE_IRQFLAGS diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b1679d08a216..537f33ac49de 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -62,11 +62,14 @@ #include "kvm_mm.h" #include "vfio.h" +#include <trace/events/ipi.h> + #define CREATE_TRACE_POINTS #include <trace/events/kvm.h> #include <linux/kvm_dirty_ring.h> + /* Worst case buffer size needed for holding an integer. */ #define ITOA_MAX_LEN 12 |