summaryrefslogtreecommitdiffstats
path: root/kernel/sched/topology.c
diff options
context:
space:
mode:
authorBarry Song <song.bao.hua@hisilicon.com>2023-10-19 11:33:21 +0800
committerPeter Zijlstra <peterz@infradead.org>2023-10-24 10:38:42 +0200
commitb95303e0aeaf446b65169dd4142cacdaeb7d4c8b (patch)
tree448ae9f3ea16e3d4593686bc929c059a8b8bd9f0 /kernel/sched/topology.c
parent5ebde09d91707a4a9bec1e3d213e3c12ffde348f (diff)
downloadlinux-stable-b95303e0aeaf446b65169dd4142cacdaeb7d4c8b.tar.gz
linux-stable-b95303e0aeaf446b65169dd4142cacdaeb7d4c8b.tar.bz2
linux-stable-b95303e0aeaf446b65169dd4142cacdaeb7d4c8b.zip
sched: Add cpus_share_resources API
Add cpus_share_resources() API. This is the preparation for the optimization of select_idle_cpu() on platforms with cluster scheduler level. On a machine with clusters cpus_share_resources() will test whether two cpus are within the same cluster. On a non-cluster machine it will behaves the same as cpus_share_cache(). So we use "resources" here for cache resources. Signed-off-by: Barry Song <song.bao.hua@hisilicon.com> Signed-off-by: Yicong Yang <yangyicong@hisilicon.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> Tested-and-reviewed-by: Chen Yu <yu.c.chen@intel.com> Tested-by: K Prateek Nayak <kprateek.nayak@amd.com> Link: https://lkml.kernel.org/r/20231019033323.54147-2-yangyicong@huawei.com
Diffstat (limited to 'kernel/sched/topology.c')
-rw-r--r--kernel/sched/topology.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index a63729f87c21..dbb8c328e8ad 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -668,6 +668,7 @@ static void destroy_sched_domains(struct sched_domain *sd)
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
DEFINE_PER_CPU(int, sd_llc_size);
DEFINE_PER_CPU(int, sd_llc_id);
+DEFINE_PER_CPU(int, sd_share_id);
DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
@@ -693,6 +694,17 @@ static void update_top_cache_domain(int cpu)
per_cpu(sd_llc_id, cpu) = id;
rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
+ sd = lowest_flag_domain(cpu, SD_CLUSTER);
+ if (sd)
+ id = cpumask_first(sched_domain_span(sd));
+
+ /*
+ * This assignment should be placed after the sd_llc_id as
+ * we want this id equals to cluster id on cluster machines
+ * but equals to LLC id on non-Cluster machines.
+ */
+ per_cpu(sd_share_id, cpu) = id;
+
sd = lowest_flag_domain(cpu, SD_NUMA);
rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
@@ -1550,6 +1562,7 @@ static struct cpumask ***sched_domains_numa_masks;
*/
#define TOPOLOGY_SD_FLAGS \
(SD_SHARE_CPUCAPACITY | \
+ SD_CLUSTER | \
SD_SHARE_PKG_RESOURCES | \
SD_NUMA | \
SD_ASYM_PACKING)