[PATCH 10/31] sched: Add @reason to sched_class->rq_{on|off}line()

From: Tejun Heo
Date: Wed Nov 30 2022 - 03:25:04 EST


->rq_{on|off}line are called either during CPU hotplug or cpuset partition
updates. Let's add an argument to distinguish the two cases. The argument
will be used by a new sched_class to track CPU hotplug events.

Signed-off-by: Tejun Heo <tj@xxxxxxxxxx>
Reviewed-by: David Vernet <dvernet@xxxxxxxx>
Acked-by: Josh Don <joshdon@xxxxxxxxxx>
Acked-by: Hao Luo <haoluo@xxxxxxxxxx>
Acked-by: Barret Rhoden <brho@xxxxxxxxxx>
---
kernel/sched/core.c | 12 ++++++------
kernel/sched/deadline.c | 4 ++--
kernel/sched/fair.c | 4 ++--
kernel/sched/rt.c | 4 ++--
kernel/sched/sched.h | 13 +++++++++----
kernel/sched/topology.c | 4 ++--
6 files changed, 23 insertions(+), 18 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 735e94bc7dbb..0699b49b1a21 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -9286,7 +9286,7 @@ static inline void balance_hotplug_wait(void)

#endif /* CONFIG_HOTPLUG_CPU */

-void set_rq_online(struct rq *rq)
+void set_rq_online(struct rq *rq, enum rq_onoff_reason reason)
{
if (!rq->online) {
const struct sched_class *class;
@@ -9296,19 +9296,19 @@ void set_rq_online(struct rq *rq)

for_each_class(class) {
if (class->rq_online)
- class->rq_online(rq);
+ class->rq_online(rq, reason);
}
}
}

-void set_rq_offline(struct rq *rq)
+void set_rq_offline(struct rq *rq, enum rq_onoff_reason reason)
{
if (rq->online) {
const struct sched_class *class;

for_each_class(class) {
if (class->rq_offline)
- class->rq_offline(rq);
+ class->rq_offline(rq, reason);
}

cpumask_clear_cpu(rq->cpu, rq->rd->online);
@@ -9404,7 +9404,7 @@ int sched_cpu_activate(unsigned int cpu)
rq_lock_irqsave(rq, &rf);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
- set_rq_online(rq);
+ set_rq_online(rq, RQ_ONOFF_HOTPLUG);
}
rq_unlock_irqrestore(rq, &rf);

@@ -9449,7 +9449,7 @@ int sched_cpu_deactivate(unsigned int cpu)
if (rq->rd) {
update_rq_clock(rq);
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
- set_rq_offline(rq);
+ set_rq_offline(rq, RQ_ONOFF_HOTPLUG);
}
rq_unlock_irqrestore(rq, &rf);

diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 9ae8f41e3372..f63e5d0c5fb1 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2519,7 +2519,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
}

/* Assumes rq->lock is held */
-static void rq_online_dl(struct rq *rq)
+static void rq_online_dl(struct rq *rq, enum rq_onoff_reason reason)
{
if (rq->dl.overloaded)
dl_set_overload(rq);
@@ -2530,7 +2530,7 @@ static void rq_online_dl(struct rq *rq)
}

/* Assumes rq->lock is held */
-static void rq_offline_dl(struct rq *rq)
+static void rq_offline_dl(struct rq *rq, enum rq_onoff_reason reason)
{
if (rq->dl.overloaded)
dl_clear_overload(rq);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 78263cef1ea8..6cad7d07186b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -11436,14 +11436,14 @@ void trigger_load_balance(struct rq *rq)
nohz_balancer_kick(rq);
}

-static void rq_online_fair(struct rq *rq)
+static void rq_online_fair(struct rq *rq, enum rq_onoff_reason reason)
{
update_sysctl();

update_runtime_enabled(rq);
}

-static void rq_offline_fair(struct rq *rq)
+static void rq_offline_fair(struct rq *rq, enum rq_onoff_reason reason)
{
update_sysctl();

diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ed2a47e4ddae..0fb7ee087669 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2470,7 +2470,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
}

/* Assumes rq->lock is held */
-static void rq_online_rt(struct rq *rq)
+static void rq_online_rt(struct rq *rq, enum rq_onoff_reason reason)
{
if (rq->rt.overloaded)
rt_set_overload(rq);
@@ -2481,7 +2481,7 @@ static void rq_online_rt(struct rq *rq)
}

/* Assumes rq->lock is held */
-static void rq_offline_rt(struct rq *rq)
+static void rq_offline_rt(struct rq *rq, enum rq_onoff_reason reason)
{
if (rq->rt.overloaded)
rt_clear_overload(rq);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 157eeabca5db..3c6ea8296ae4 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2169,6 +2169,11 @@ extern const u32 sched_prio_to_wmult[40];

#define RETRY_TASK ((void *)-1UL)

+enum rq_onoff_reason {
+ RQ_ONOFF_HOTPLUG, /* CPU is going on/offline */
+ RQ_ONOFF_TOPOLOGY, /* sched domain topology update */
+};
+
struct sched_class {

#ifdef CONFIG_UCLAMP_TASK
@@ -2201,8 +2206,8 @@ struct sched_class {
const struct cpumask *newmask,
u32 flags);

- void (*rq_online)(struct rq *rq);
- void (*rq_offline)(struct rq *rq);
+ void (*rq_online)(struct rq *rq, enum rq_onoff_reason reason);
+ void (*rq_offline)(struct rq *rq, enum rq_onoff_reason reason);

struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
#endif
@@ -2726,8 +2731,8 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
raw_spin_rq_unlock(rq1);
}

-extern void set_rq_online (struct rq *rq);
-extern void set_rq_offline(struct rq *rq);
+extern void set_rq_online (struct rq *rq, enum rq_onoff_reason reason);
+extern void set_rq_offline(struct rq *rq, enum rq_onoff_reason reason);
extern bool sched_smp_initialized;

#else /* CONFIG_SMP */
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 8739c2a5a54e..0e859bea1cb6 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -493,7 +493,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
old_rd = rq->rd;

if (cpumask_test_cpu(rq->cpu, old_rd->online))
- set_rq_offline(rq);
+ set_rq_offline(rq, RQ_ONOFF_TOPOLOGY);

cpumask_clear_cpu(rq->cpu, old_rd->span);

@@ -511,7 +511,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)

cpumask_set_cpu(rq->cpu, rd->span);
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
- set_rq_online(rq);
+ set_rq_online(rq, RQ_ONOFF_TOPOLOGY);

raw_spin_rq_unlock_irqrestore(rq, flags);

--
2.38.1