diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2016-03-10 12:54:17 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-05-06 14:58:25 +0200 |
commit | 7d97669933eb94245ec9b715753753ec5ca8f646 (patch) | |
tree | 2bd611a9679130a468c3782ce108a1be262f13bc /kernel/sched | |
parent | e9cd8fa4fcfd67c95db9b87c0fff88fa23cb00e5 (diff) | |
download | linux-7d97669933eb94245ec9b715753753ec5ca8f646.tar.gz linux-7d97669933eb94245ec9b715753753ec5ca8f646.tar.bz2 linux-7d97669933eb94245ec9b715753753ec5ca8f646.zip |
sched/migration: Move CPU_ONLINE into scheduler state
The alleged requirement that the migration notifier has a lower priority than
perf is completely undocumented and there is no indication at all that this is
true. perf does not even handle the CPU_ONLINE notification and perf really
has nothing to do with migration.
Move the CPU_ONLINE code into the sched_activate_cpu() state callback.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160310120025.421743581@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 33 |
1 files changed, 22 insertions, 11 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 688e8a83208c..8d8d9034edff 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5424,17 +5424,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) switch (action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: - /* Update our root-domain */ - raw_spin_lock_irqsave(&rq->lock, flags); - if (rq->rd) { - BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); - - set_rq_online(rq); - } - raw_spin_unlock_irqrestore(&rq->lock, flags); - break; - #ifdef CONFIG_HOTPLUG_CPU case CPU_DYING: sched_ttwu_pending(); @@ -7090,12 +7079,34 @@ static int cpuset_cpu_inactive(unsigned int cpu) int sched_cpu_activate(unsigned int cpu) { + struct rq *rq = cpu_rq(cpu); + unsigned long flags; + set_cpu_active(cpu, true); if (sched_smp_initialized) { sched_domains_numa_masks_set(cpu); cpuset_cpu_active(); } + + /* + * Put the rq online, if not already. This happens: + * + * 1) In the early boot process, because we build the real domains + * after all cpus have been brought up. + * + * 2) At runtime, if cpuset_cpu_active() fails to rebuild the + * domains. + */ + raw_spin_lock_irqsave(&rq->lock, flags); + if (rq->rd) { + BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); + set_rq_online(rq); + } + raw_spin_unlock_irqrestore(&rq->lock, flags); + + update_max_interval(); + return 0; } |