Re: [PATCH v2 5/7] x86/sched: Remove SD_ASYM_PACKING from the "SMT" domain

From: Valentin Schneider
Date: Thu Dec 22 2022 - 11:57:47 EST


On 19/12/22 16:42, Ricardo Neri wrote:
> On Thu, Dec 15, 2022 at 04:48:14PM +0000, Valentin Schneider wrote:
>> With that said, so far all but one flag (SD_PREFER_SIBLING, and that's
>> because of big.LITTLE woes) follow the SDF_SHARED_{CHILD, PARENT} pattern,
>> if SD_ASYM_PACKING no longer does then we need to think whether we're
>> trying to make it do funky things.
>
> My thesis is that x86 does not need the SD_ASYM_PACKING flag at the SMT
> level because all SMT siblings are identical. There are cores of higher
> priority at the "MC" level (maybe in the future at the "CLS" level).
>
> Power7 is fine because it only uses SD_ASYM_PACKING at the SMT level.
>

So with what I groked from your series, I agree with you, x86 shouldn't
need it at SMT level.

What about the below?

---

diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h
index 57bde66d95f7a..8dc16942135b4 100644
--- a/include/linux/sched/sd_flags.h
+++ b/include/linux/sched/sd_flags.h
@@ -132,12 +132,12 @@ SD_FLAG(SD_SERIALIZE, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
/*
* Place busy tasks earlier in the domain
*
- * SHARED_CHILD: Usually set on the SMT level. Technically could be set further
- * up, but currently assumed to be set from the base domain
- * upwards (see update_top_cache_domain()).
+ * SHARED_PARENT: Usually set on the SMT level. Can be set further up if all
+ * siblings of an SMT core are identical, but SMT cores themselves
+ * have different priorites.
* NEEDS_GROUPS: Load balancing flag.
*/
-SD_FLAG(SD_ASYM_PACKING, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+SD_FLAG(SD_ASYM_PACKING, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)

/*
* Prefer to place tasks in a sibling domain
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b1d338a740e56..2d532e29373b1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1772,6 +1772,19 @@ queue_balance_callback(struct rq *rq,
for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
__sd; __sd = __sd->parent)

+static inline struct sched_domain *__highest_flag_domain(struct sched_domain *sd, int flag)
+{
+ struct sched_domain *hsd = NULL;
+
+ for (; sd; sd = sd->parent) {
+ if (!(sd->flags & flag))
+ break;
+ hsd = sd;
+ }
+
+ return hsd;
+}
+
/**
* highest_flag_domain - Return highest sched_domain containing flag.
* @cpu: The CPU whose highest level of sched domain is to
@@ -1783,15 +1796,7 @@ queue_balance_callback(struct rq *rq,
*/
static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
{
- struct sched_domain *sd, *hsd = NULL;
-
- for_each_domain(cpu, sd) {
- if (!(sd->flags & flag))
- break;
- hsd = sd;
- }
-
- return hsd;
+ return __highest_flag_domain(rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd), flag);
}

static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
@@ -1806,6 +1811,16 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
return sd;
}

+static inline struct sched_domain *highest_parent_flag_domain(int cpu, int flag)
+{
+ struct sched_domain *sd;
+
+ SCHED_WARN_ON(!(sd_flag_debug[ilog2(flag)].meta_flags & SDF_SHARED_PARENT));
+
+ sd = lowest_flag_domain(cpu, flag);
+ return __highest_flag_domain(sd, flag);
+}
+
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
DECLARE_PER_CPU(int, sd_llc_size);
DECLARE_PER_CPU(int, sd_llc_id);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 8154ef590b9f8..4e0e5b27c331b 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -692,7 +692,7 @@ static void update_top_cache_domain(int cpu)
sd = lowest_flag_domain(cpu, SD_NUMA);
rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);

- sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
+ sd = highest_parent_flag_domain(cpu, SD_ASYM_PACKING);
rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);

sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL);