diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2019-05-17 12:50:42 +0200 |
---|---|---|
committer | Heiko Carstens <heiko.carstens@de.ibm.com> | 2019-06-15 12:25:52 +0200 |
commit | 38f2c691a4b3e89d476f8e8350d1ca299974b89d (patch) | |
tree | 8b4ab76fd8281be6dfcf8d5edc48d5ee522a4710 /arch/s390 | |
parent | 7928260539f3a13b5b23a3fa0a7c0e4f5255940b (diff) | |
download | linux-stable-38f2c691a4b3e89d476f8e8350d1ca299974b89d.tar.gz linux-stable-38f2c691a4b3e89d476f8e8350d1ca299974b89d.tar.bz2 linux-stable-38f2c691a4b3e89d476f8e8350d1ca299974b89d.zip |
s390: improve wait logic of stop_machine
The stop_machine loop to advance the state machine and to wait for all
affected CPUs to check-in calls cpu_relax_yield in a tight loop until
the last missing CPUs acknowledged the state transition.
On a virtual system where not all logical CPUs are backed by real CPUs
all the time it can take a while for all CPUs to check-in. With the
current definition of cpu_relax_yield a diagnose 0x44 is done which
tells the hypervisor to schedule *some* other CPU. That can be any
CPU and not necessarily one of the CPUs that need to run in order to
advance the state machine. This can lead to a pretty bad diagnose 0x44
storm until the last missing CPU finally checked-in.
Replace the undirected cpu_relax_yield based on diagnose 0x44 with a
directed yield. Each CPU in the wait loop will pick up the next CPU
in the cpumask of stop_machine. The diagnose 0x9c is used to tell the
hypervisor to run this next CPU instead of the current one. If there
is only a limited number of real CPUs backing the virtual CPUs we
end up with the real CPUs passed around in a round-robin fashion.
[heiko.carstens@de.ibm.com]:
Use cpumask_next_wrap as suggested by Peter Zijlstra.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/processor.h | 3 | ||||
-rw-r--r-- | arch/s390/kernel/processor.c | 17 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 2 |
3 files changed, 15 insertions, 7 deletions
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index b0fcbc37b637..445ce9ee4404 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -36,6 +36,7 @@ #ifndef __ASSEMBLY__ +#include <linux/cpumask.h> #include <linux/linkage.h> #include <linux/irqflags.h> #include <asm/cpu.h> @@ -225,7 +226,7 @@ static __no_kasan_or_inline unsigned short stap(void) * Give up the time slice of the virtual PU. */ #define cpu_relax_yield cpu_relax_yield -void cpu_relax_yield(void); +void cpu_relax_yield(const struct cpumask *cpumask); #define cpu_relax() barrier() diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 5de13307b703..4cdaefec1b7c 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -31,6 +31,7 @@ struct cpu_info { }; static DEFINE_PER_CPU(struct cpu_info, cpu_info); +static DEFINE_PER_CPU(int, cpu_relax_retry); static bool machine_has_cpu_mhz; @@ -58,13 +59,19 @@ void s390_update_cpu_mhz(void) on_each_cpu(update_cpu_mhz, NULL, 0); } -void notrace cpu_relax_yield(void) +void notrace cpu_relax_yield(const struct cpumask *cpumask) { - if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) { - diag_stat_inc(DIAG_STAT_X044); - asm volatile("diag 0,0,0x44"); + int cpu, this_cpu; + + this_cpu = smp_processor_id(); + if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) { + __this_cpu_write(cpu_relax_retry, 0); + cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false); + if (cpu >= nr_cpu_ids) + return; + if (arch_vcpu_is_preempted(cpu)) + smp_yield_cpu(cpu); } - barrier(); } EXPORT_SYMBOL(cpu_relax_yield); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index f00955940694..44974654cbd0 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -414,7 +414,7 @@ void smp_yield_cpu(int cpu) diag_stat_inc_norecursion(DIAG_STAT_X09C); asm volatile("diag %0,0,0x9c" : : "d" (pcpu_devices[cpu].address)); - } else if (MACHINE_HAS_DIAG44) { + } else if (MACHINE_HAS_DIAG44 && !smp_cpu_mtid) { diag_stat_inc_norecursion(DIAG_STAT_X044); asm volatile("diag 0,0,0x44"); } |