[Patch v3 2/6] sched/topology: Record number of cores in sched gro=

From: Tim C Chen
Date: Wed May 17 2023 - 12:09:54 EST


When balancing sibling domains that have different number of cores,
tasks in respective sibling domain should be proportional to the number
of cores in each domain. In preparation of implementing such a policy,
record the number of cores in a scheduling group.

Signed-off-by: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
---
kernel/sched/sched.h | 1 +
kernel/sched/topology.c | 21 +++++++++++++++++++++
2 files changed, 22 insertions(+)

diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 3d0eb36350d2..5f7f36e45b87 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1860,6 +1860,7 @@ struct sched_group {
atomic_t ref;
=20
unsigned int group_weight;
+ unsigned int cores;
struct sched_group_capacity *sgc;
int asym_prefer_cpu; /* CPU of highest priority in group */
int flags;
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 6d5628fcebcf..4ecdaef3f8ab 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1262,6 +1262,26 @@ build_sched_groups(struct sched_domain *sd, int cpu)
return 0;
}
=20
+#ifdef CONFIG_SCHED_SMT
+static inline int sched_group_cores(struct sched_group *sg)
+{
+ struct cpumask *mask =3D sched_domains_tmpmask2;
+ int cpu, cores =3D 0;
+
+ cpumask_copy(mask, sched_group_span(sg));
+ for_each_cpu(cpu, mask) {
+ cores++;
+ cpumask_andnot(mask, mask, cpu_smt_mask(cpu));
+ }
+ return cores;
+}
+#else
+static inline int sched_group_cores(struct sched_group *sg)
+{
+ return sg->group_weight;
+}
+#endif
+
/*
* Initialize sched groups cpu_capacity.
*
@@ -1282,6 +1302,7 @@ static void init_sched_groups_capacity(int cpu, struc=
t sched_domain *sd)
int cpu, max_cpu =3D -1;
=20
sg->group_weight =3D cpumask_weight(sched_group_span(sg));
+ sg->cores =3D sched_group_cores(sg);
=20
if (!(sd->flags & SD_ASYM_PACKING))
goto next;
--=20
2.32.0