Re: [PATCH v6 3/3] sched/fair: Combine EAS check with overutilized access

From: Vincent Guittot
Date: Thu Mar 07 2024 - 11:51:31 EST


On Thu, 7 Mar 2024 at 09:58, Shrikanth Hegde <sshegde@xxxxxxxxxxxxx> wrote:
>
> Access to overutilized is always used with sched_energy_enabled in
> the pattern:
>
> if (sched_energy_enabled && !overutilized)
> do something
>
> So modify the helper function to return this pattern. This is more
> readable code as it would say, do something when root domain is not
> overutilized.
>
> No change in functionality intended.
>
> Suggested-by: Vincent Guittot <vincent.guittot@xxxxxxxxxx>
> Signed-off-by: Shrikanth Hegde <sshegde@xxxxxxxxxxxxx>

Reviewed-by: Vincent Guittot <vincent.guittot@xxxxxxxxxx>


> ---
> kernel/sched/fair.c | 24 +++++++++---------------
> 1 file changed, 9 insertions(+), 15 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 87e08a252f94..bcda18a2ccfe 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -6676,12 +6676,11 @@ static inline bool cpu_overutilized(int cpu)
> }
>
> /*
> - * Ensure that caller can do EAS. overutilized value
> - * make sense only if EAS is enabled
> + * overutilized value make sense only if EAS is enabled
> */
> -static inline int is_rd_overutilized(struct root_domain *rd)
> +static inline int is_rd_not_overutilized(struct root_domain *rd)
> {
> - return READ_ONCE(rd->overutilized);
> + return sched_energy_enabled() && !READ_ONCE(rd->overutilized);
> }
>
> static inline void set_rd_overutilized_status(struct root_domain *rd,
> @@ -6700,10 +6699,8 @@ static inline void check_update_overutilized_status(struct rq *rq)
> * overutilized field is used for load balancing decisions only
> * if energy aware scheduler is being used
> */
> - if (!sched_energy_enabled())
> - return;
>
> - if (!is_rd_overutilized(rq->rd) && cpu_overutilized(rq->cpu))
> + if (is_rd_not_overutilized(rq->rd) && cpu_overutilized(rq->cpu))
> set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED);
> }
> #else
> @@ -7989,7 +7986,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
>
> rcu_read_lock();
> pd = rcu_dereference(rd->pd);
> - if (!pd || is_rd_overutilized(rd))
> + if (!pd)
> goto unlock;
>
> /*
> @@ -8192,7 +8189,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
> cpumask_test_cpu(cpu, p->cpus_ptr))
> return cpu;
>
> - if (sched_energy_enabled()) {
> + if (is_rd_not_overutilized(this_rq()->rd)) {
> new_cpu = find_energy_efficient_cpu(p, prev_cpu);
> if (new_cpu >= 0)
> return new_cpu;
> @@ -10869,12 +10866,9 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
> if (busiest->group_type == group_misfit_task)
> goto force_balance;
>
> - if (sched_energy_enabled()) {
> - struct root_domain *rd = env->dst_rq->rd;
> -
> - if (rcu_dereference(rd->pd) && !is_rd_overutilized(rd))
> - goto out_balanced;
> - }
> + if (is_rd_not_overutilized(env->dst_rq->rd) &&
> + rcu_dereference(env->dst_rq->rd->pd))
> + goto out_balanced;
>
> /* ASYM feature bypasses nice load balance check */
> if (busiest->group_type == group_asym_packing)
> --
> 2.39.3
>