diff options
author | Ben Hutchings <ben@decadent.org.uk> | 2019-05-10 00:46:25 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2019-05-16 19:45:11 +0200 |
commit | 1f562beba75c9da65461debbdb5d8d2871ab38a1 (patch) | |
tree | 55b8dcb09e8ee6882de22c9fa7abf59d3e8ec6e5 /kernel | |
parent | f576a78075bae5799b6c29f7e7548003e89361da (diff) | |
download | linux-stable-1f562beba75c9da65461debbdb5d8d2871ab38a1.tar.gz linux-stable-1f562beba75c9da65461debbdb5d8d2871ab38a1.tar.bz2 linux-stable-1f562beba75c9da65461debbdb5d8d2871ab38a1.zip |
sched: Add sched_smt_active()
Add the sched_smt_active() function needed for some x86 speculation
mitigations. This was introduced upstream by commits 1b568f0aabf2
"sched/core: Optimize SCHED_SMT", ba2591a5993e "sched/smt: Update
sched_smt_present at runtime", c5511d03ec09 "sched/smt: Make
sched_smt_present track topology", and 321a874a7ef8 "sched/smt: Expose
sched_smt_present static key". The upstream implementation uses the
static_key_{disable,enable}_cpuslocked() functions, which aren't
practical to backport.
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 24 | ||||
-rw-r--r-- | kernel/sched/sched.h | 1 |
2 files changed, 25 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d0618951014b..d35a7d528ea6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5610,6 +5610,10 @@ static void set_cpu_rq_start_time(void) rq->age_stamp = sched_clock_cpu(cpu); } +#ifdef CONFIG_SCHED_SMT +atomic_t sched_smt_present = ATOMIC_INIT(0); +#endif + static int sched_cpu_active(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -5626,11 +5630,23 @@ static int sched_cpu_active(struct notifier_block *nfb, * set_cpu_online(). But it might not yet have marked itself * as active, which is essential from here on. */ +#ifdef CONFIG_SCHED_SMT + /* + * When going up, increment the number of cores with SMT present. + */ + if (cpumask_weight(cpu_smt_mask(cpu)) == 2) + atomic_inc(&sched_smt_present); +#endif set_cpu_active(cpu, true); stop_machine_unpark(cpu); return NOTIFY_OK; case CPU_DOWN_FAILED: +#ifdef CONFIG_SCHED_SMT + /* Same as for CPU_ONLINE */ + if (cpumask_weight(cpu_smt_mask(cpu)) == 2) + atomic_inc(&sched_smt_present); +#endif set_cpu_active(cpu, true); return NOTIFY_OK; @@ -5645,7 +5661,15 @@ static int sched_cpu_inactive(struct notifier_block *nfb, switch (action & ~CPU_TASKS_FROZEN) { case CPU_DOWN_PREPARE: set_cpu_active((long)hcpu, false); +#ifdef CONFIG_SCHED_SMT + /* + * When going down, decrement the number of cores with SMT present. + */ + if (cpumask_weight(cpu_smt_mask((long)hcpu)) == 2) + atomic_dec(&sched_smt_present); +#endif return NOTIFY_OK; + default: return NOTIFY_DONE; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 6893ee31df4d..8b96df04ba78 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2,6 +2,7 @@ #include <linux/sched.h> #include <linux/sched/sysctl.h> #include <linux/sched/rt.h> +#include <linux/sched/smt.h> #include <linux/sched/deadline.h> #include <linux/mutex.h> #include <linux/spinlock.h> |