[PATCH 21/30] sched: schedule eagerly in resched_cpu()

From: Ankur Arora
Date: Tue Feb 13 2024 - 01:03:44 EST


resched_cpu() is used as an RCU hammer of last resort. Force
rescheduling eagerly with tif_resched(NR_now).

Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Juri Lelli <juri.lelli@xxxxxxxxxx>
Cc: Vincent Guittot <vincent.guittot@xxxxxxxxxx>
Cc: Paul E. McKenney <paulmck@xxxxxxxxxx>
Signed-off-by: Ankur Arora <ankur.a.arora@xxxxxxxxxx>
---
kernel/sched/core.c | 15 +++++++++++----
kernel/sched/sched.h | 1 +
2 files changed, 12 insertions(+), 4 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5e3dd95efb73..de963e8e2bee 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1035,9 +1035,9 @@ void wake_up_q(struct wake_q_head *head)
* For preemption models other than PREEMPT_AUTO: always schedule
* eagerly.
*
- * For PREEMPT_AUTO: allow everything, whether running in user or
- * kernel context, to finish its time quanta, and mark for
- * rescheduling at the next exit to user.
+ * For PREEMPT_AUTO: schedule idle threads eagerly, allow everything
+ * else, whether running in user or kernel context, to finish its time
+ * quanta, and mark for rescheduling at the next exit to user.
*/
static resched_t resched_opt_translate(struct task_struct *curr,
enum resched_opt opt)
@@ -1045,6 +1045,9 @@ static resched_t resched_opt_translate(struct task_struct *curr,
if (!IS_ENABLED(CONFIG_PREEMPT_AUTO))
return NR_now;

+ if (opt == RESCHED_FORCE)
+ return NR_now;
+
if (is_idle_task(curr))
return NR_now;

@@ -1106,7 +1109,11 @@ void resched_cpu(int cpu)

raw_spin_rq_lock_irqsave(rq, flags);
if (cpu_online(cpu) || cpu == smp_processor_id())
- resched_curr(rq);
+ /*
+ * resched_cpu() is typically used as an RCU hammer.
+ * Mark for imminent resched.
+ */
+ __resched_curr(rq, RESCHED_FORCE);
raw_spin_rq_unlock_irqrestore(rq, flags);
}

diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c3ae70ad23ec..3600a8673d08 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2464,6 +2464,7 @@ extern void reweight_task(struct task_struct *p, int prio);

enum resched_opt {
RESCHED_DEFAULT,
+ RESCHED_FORCE,
};

extern void __resched_curr(struct rq *rq, enum resched_opt opt);
--
2.31.1