summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-04-14 17:32:07 +0200
committerIngo Molnar <mingo@kernel.org>2017-05-15 10:15:24 +0200
commit91eaed0d61319f58a9f8e43d41a8cbb069b4f73d (patch)
tree1e2e6ef534609f5bed4cbdcce180ed51b7bd6e00 /kernel
parent0372dd2736e02672ac6e189c31f7d8c02ad543cd (diff)
downloadlinux-91eaed0d61319f58a9f8e43d41a8cbb069b4f73d.tar.gz
linux-91eaed0d61319f58a9f8e43d41a8cbb069b4f73d.tar.bz2
linux-91eaed0d61319f58a9f8e43d41a8cbb069b4f73d.zip
sched/topology: Simplify build_overlap_sched_groups()
Now that the first group will always be the previous domain of this @cpu this can be simplified. In fact, writing the code now removed should've been a big clue I was doing it wrong :/ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/topology.c13
1 files changed, 2 insertions, 11 deletions
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 921dedde2ee1..6b10e0a956c7 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -557,7 +557,7 @@ static void init_overlap_sched_group(struct sched_domain *sd,
static int
build_overlap_sched_groups(struct sched_domain *sd, int cpu)
{
- struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
+ struct sched_group *first = NULL, *last = NULL, *sg;
const struct cpumask *span = sched_domain_span(sd);
struct cpumask *covered = sched_domains_tmpmask;
struct sd_data *sdd = sd->private;
@@ -587,15 +587,6 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
init_overlap_sched_group(sd, sg, i);
- /*
- * Make sure the first group of this domain contains the
- * canonical balance CPU. Otherwise the sched_domain iteration
- * breaks. See update_sg_lb_stats().
- */
- if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
- group_balance_cpu(sg) == cpu)
- groups = sg;
-
if (!first)
first = sg;
if (last)
@@ -603,7 +594,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
last = sg;
last->next = first;
}
- sd->groups = groups;
+ sd->groups = first;
return 0;