[PATCH 2/2] cpu-hotplug: Always use real time scheduling when hotplugging a CPU

From: Prasad Sodagudi
Date: Wed Sep 23 2020 - 19:38:24 EST


From: Syed Rameez Mustafa <rameezmustafa@xxxxxxxxxxxxxx>

CPU hotplug operations take place in preemptible context. This leaves
the hotplugging thread at the mercy of overall system load and CPU
availability. If the hotplugging thread does not get an opportunity
to execute after it has already begun a hotplug operation, CPUs can
end up being stuck in a quasi online state. In the worst case a CPU
can be stuck in a state where the migration thread is parked while
another task is executing and changing affinity in a loop. This
combination can result in unbounded execution time for the running
task until the hotplugging thread gets the chance to run to complete
the hotplug operation.

Fix the said problem by ensuring that hotplug can only occur from
threads belonging to the RT sched class. This allows the hotplugging
thread priority on the CPU no matter what the system load or the
number of available CPUs are. If a SCHED_NORMAL task attempts to
hotplug a CPU, we temporarily elevate it's scheduling policy to RT.
Furthermore, we disallow hotplugging operations to begin if the
calling task belongs to the idle and deadline classes or those that
use the SCHED_BATCH policy.

Signed-off-by: Syed Rameez Mustafa <rameezmustafa@xxxxxxxxxxxxxx>
Signed-off-by: Prasad Sodagudi <psodagud@xxxxxxxxxxxxxx>
---
kernel/cpu.c | 41 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)

diff --git a/kernel/cpu.c b/kernel/cpu.c
index 68b3740..aea4ce2 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -32,6 +32,7 @@
#include <linux/relay.h>
#include <linux/slab.h>
#include <linux/percpu-rwsem.h>
+#include <uapi/linux/sched/types.h>

#include <trace/events/power.h>
#define CREATE_TRACE_POINTS
@@ -1191,6 +1192,33 @@ void cpuhp_online_idle(enum cpuhp_state state)
complete_ap_thread(st, true);
}

+static int switch_to_rt_policy(void)
+{
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+ unsigned int policy = current->policy;
+ int err;
+
+ /* Nobody should be attempting hotplug from these policy contexts. */
+ if (policy == SCHED_BATCH || policy == SCHED_IDLE ||
+ policy == SCHED_DEADLINE)
+ return -EPERM;
+
+ if (policy == SCHED_FIFO || policy == SCHED_RR)
+ return 1;
+
+ /* Only SCHED_NORMAL left. */
+ err = sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+ return err;
+
+}
+
+static int switch_to_fair_policy(void)
+{
+ struct sched_param param = { .sched_priority = 0 };
+
+ return sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
+}
+
/* Requires cpu_add_remove_lock to be held */
static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
{
@@ -1258,6 +1286,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
static int cpu_up(unsigned int cpu, enum cpuhp_state target)
{
int err = 0;
+ int switch_err = 0;

if (!cpu_possible(cpu)) {
pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
@@ -1268,6 +1297,10 @@ static int cpu_up(unsigned int cpu, enum cpuhp_state target)
return -EINVAL;
}

+ switch_err = switch_to_rt_policy();
+ if (switch_err < 0)
+ return switch_err;
+
err = try_online_node(cpu_to_node(cpu));
if (err)
return err;
@@ -1286,6 +1319,14 @@ static int cpu_up(unsigned int cpu, enum cpuhp_state target)
err = _cpu_up(cpu, 0, target);
out:
cpu_maps_update_done();
+
+ if (!switch_err) {
+ switch_err = switch_to_fair_policy();
+ if (switch_err)
+ pr_err("Hotplug policy switch err=%d Task %s pid=%d\n",
+ switch_err, current->comm, current->pid);
+ }
+
return err;
}

--
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project