Re: [PATCH 1/2] sched/fair: skip newidle update stats

From: Peter Zijlstra
Date: Fri Nov 12 2021 - 09:55:28 EST




Subject: sched/fair: Simplify newidle_balance()
From: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Date: Fri Nov 12 15:46:26 CET 2021

Move rq_{un,re}pin_lock() next to raw_spin_rq_{un,}lock().

Remove all rcu_read_{,un}lock(), since we have preempt/irqs disabled
over the whole function and those hold off RCU (again).

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
kernel/sched/fair.c | 23 ++++++++---------------
1 file changed, 8 insertions(+), 15 deletions(-)

--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10884,15 +10884,6 @@ static int newidle_balance(struct rq *th
if (!cpu_active(this_cpu))
return 0;

- /*
- * This is OK, because current is on_cpu, which avoids it being picked
- * for load-balance and preemption/IRQs are still disabled avoiding
- * further scheduler activity on it and we're being very careful to
- * re-start the picking loop.
- */
- rq_unpin_lock(this_rq, rf);
-
- rcu_read_lock();
sd = rcu_dereference_check_sched_domain(this_rq->sd);

if (!READ_ONCE(this_rq->rd->overload) ||
@@ -10908,18 +10899,22 @@ static int newidle_balance(struct rq *th
*/
early_stop = 1;
}
- rcu_read_unlock();

goto out;
}
- rcu_read_unlock();

+ /*
+ * This is OK, because current is on_cpu, which avoids it being picked
+ * for load-balance and preemption/IRQs are still disabled avoiding
+ * further scheduler activity on it and we're being very careful to
+ * re-start the picking loop.
+ */
+ rq_unpin_lock(this_rq, rf);
raw_spin_rq_unlock(this_rq);

t0 = sched_clock_cpu(this_cpu);
update_blocked_averages(this_cpu);

- rcu_read_lock();
for_each_domain(this_cpu, sd) {
int continue_balancing = 1;
u64 domain_cost;
@@ -10953,9 +10948,9 @@ static int newidle_balance(struct rq *th
this_rq->ttwu_pending)
break;
}
- rcu_read_unlock();

raw_spin_rq_lock(this_rq);
+ rq_repin_lock(this_rq, rf);

if (curr_cost > this_rq->max_idle_balance_cost)
this_rq->max_idle_balance_cost = curr_cost;
@@ -10982,8 +10977,6 @@ static int newidle_balance(struct rq *th
else if (!early_stop)
nohz_newidle_balance(this_rq);

- rq_repin_lock(this_rq, rf);
-
return pulled_task;
}