Re: [PATCH V3 1/2] topology: Allow multiple entities to provide sched_freq_tick() callback

From: Viresh Kumar
Date: Fri Feb 05 2021 - 04:18:44 EST


On 03-02-21, 11:45, Ionela Voinescu wrote:
> Therefore, I think system level invariance management (checks and
> call to rebuild_sched_domains_energy()) also needs to move from arm64
> code to arch_topology code.

Here is the 3rd patch of this series then :)

From: Viresh Kumar <viresh.kumar@xxxxxxxxxx>
Date: Fri, 5 Feb 2021 13:31:53 +0530
Subject: [PATCH] drivers: arch_topology: rebuild sched domains on invariance
change

We already do this for the arm64, move it to arch_topology.c as we
manage all sched_freq_tick sources here now.

Reported-by: Ionela Voinescu <ionela.voinescu@xxxxxxx>
Signed-off-by: Viresh Kumar <viresh.kumar@xxxxxxxxxx>
---
arch/arm64/kernel/topology.c | 16 ----------------
drivers/base/arch_topology.c | 22 ++++++++++++++++++++++
2 files changed, 22 insertions(+), 16 deletions(-)

diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 1e47dfd465f8..47fca7376c93 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -240,7 +240,6 @@ static struct scale_freq_data amu_sfd = {

static void amu_fie_setup(const struct cpumask *cpus)
{
- bool invariant;
int cpu;

/* We are already set since the last insmod of cpufreq driver */
@@ -257,25 +256,10 @@ static void amu_fie_setup(const struct cpumask *cpus)

cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus);

- invariant = topology_scale_freq_invariant();
-
- /* We aren't fully invariant yet */
- if (!invariant && !cpumask_equal(amu_fie_cpus, cpu_present_mask))
- return;
-
topology_set_scale_freq_source(&amu_sfd, amu_fie_cpus);

pr_debug("CPUs[%*pbl]: counters will be used for FIE.",
cpumask_pr_args(cpus));
-
- /*
- * Task scheduler behavior depends on frequency invariance support,
- * either cpufreq or counter driven. If the support status changes as
- * a result of counter initialisation and use, retrigger the build of
- * scheduling domains to ensure the information is propagated properly.
- */
- if (!invariant)
- rebuild_sched_domains_energy();
}

static int init_amu_fie_callback(struct notifier_block *nb, unsigned long val,
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 20b511949cd8..3631877f4440 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -23,6 +23,7 @@

static DEFINE_PER_CPU(struct scale_freq_data *, sft_data);
static struct cpumask scale_freq_counters_mask;
+static bool scale_freq_invariant;

static bool supports_scale_freq_counters(const struct cpumask *cpus)
{
@@ -35,6 +36,23 @@ bool topology_scale_freq_invariant(void)
supports_scale_freq_counters(cpu_online_mask);
}

+static void update_scale_freq_invariant(bool status)
+{
+ if (scale_freq_invariant == status)
+ return;
+
+ /*
+ * Task scheduler behavior depends on frequency invariance support,
+ * either cpufreq or counter driven. If the support status changes as
+ * a result of counter initialisation and use, retrigger the build of
+ * scheduling domains to ensure the information is propagated properly.
+ */
+ if (topology_scale_freq_invariant() == status) {
+ scale_freq_invariant = status;
+ rebuild_sched_domains_energy();
+ }
+}
+
void topology_set_scale_freq_source(struct scale_freq_data *data,
const struct cpumask *cpus)
{
@@ -50,6 +68,8 @@ void topology_set_scale_freq_source(struct scale_freq_data *data,
cpumask_set_cpu(cpu, &scale_freq_counters_mask);
}
}
+
+ update_scale_freq_invariant(true);
}
EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);

@@ -67,6 +87,8 @@ void topology_clear_scale_freq_source(enum scale_freq_source source,
cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
}
}
+
+ update_scale_freq_invariant(false);
}
EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);

--
2.25.0.rc1.19.g042ed3e048af

--
viresh