[tip:sched/core] sched/topology: Add partition_sched_domains_locked()

From: tip-bot for Mathieu Poirier
Date: Thu Jul 25 2019 - 14:08:15 EST


Commit-ID: c22645f4c8f021fb1c5e7189eb1f968132cc0844
Gitweb: https://git.kernel.org/tip/c22645f4c8f021fb1c5e7189eb1f968132cc0844
Author: Mathieu Poirier <mathieu.poirier@xxxxxxxxxx>
AuthorDate: Fri, 19 Jul 2019 15:59:53 +0200
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitDate: Thu, 25 Jul 2019 15:51:57 +0200

sched/topology: Add partition_sched_domains_locked()

Introduce the partition_sched_domains_locked() function by taking
the mutex locking code out of the original function. That way
the work done by partition_sched_domains_locked() can be reused
without dropping the mutex lock.

No change of functionality is introduced by this patch.

Tested-by: Dietmar Eggemann <dietmar.eggemann@xxxxxxx>
Signed-off-by: Mathieu Poirier <mathieu.poirier@xxxxxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Acked-by: Tejun Heo <tj@xxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: bristot@xxxxxxxxxx
Cc: claudio@xxxxxxxxxxxxxxx
Cc: lizefan@xxxxxxxxxx
Cc: longman@xxxxxxxxxx
Cc: luca.abeni@xxxxxxxxxxxxxxx
Cc: rostedt@xxxxxxxxxxx
Cc: tommaso.cucinotta@xxxxxxxxxxxxxxx
Link: https://lkml.kernel.org/r/20190719140000.31694-2-juri.lelli@xxxxxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
---
include/linux/sched/topology.h | 10 ++++++++++
kernel/sched/topology.c | 17 +++++++++++++----
2 files changed, 23 insertions(+), 4 deletions(-)

diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 7863bb62d2ab..f341163fedc9 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -150,6 +150,10 @@ static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
return to_cpumask(sd->span);
}

+extern void partition_sched_domains_locked(int ndoms_new,
+ cpumask_var_t doms_new[],
+ struct sched_domain_attr *dattr_new);
+
extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new);

@@ -194,6 +198,12 @@ extern void set_sched_topology(struct sched_domain_topology_level *tl);

struct sched_domain_attr;

+static inline void
+partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
+ struct sched_domain_attr *dattr_new)
+{
+}
+
static inline void
partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 4eea2c9bc732..5a174ae6ecf3 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -2169,16 +2169,16 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
* ndoms_new == 0 is a special case for destroying existing domains,
* and it will not create the default domain.
*
- * Call with hotplug lock held
+ * Call with hotplug lock and sched_domains_mutex held
*/
-void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
- struct sched_domain_attr *dattr_new)
+void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
+ struct sched_domain_attr *dattr_new)
{
bool __maybe_unused has_eas = false;
int i, j, n;
int new_topology;

- mutex_lock(&sched_domains_mutex);
+ lockdep_assert_held(&sched_domains_mutex);

/* Always unregister in case we don't destroy any domains: */
unregister_sched_domain_sysctl();
@@ -2261,6 +2261,15 @@ match3:
ndoms_cur = ndoms_new;

register_sched_domain_sysctl();
+}

+/*
+ * Call with hotplug lock held
+ */
+void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+ struct sched_domain_attr *dattr_new)
+{
+ mutex_lock(&sched_domains_mutex);
+ partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
mutex_unlock(&sched_domains_mutex);
}