[PATCH] sched/fair: Update freq on tick and context switch and blocked avgs

From: Qais Yousef
Date: Mon Dec 25 2023 - 20:23:57 EST


Signed-off-by: Qais Yousef (Google) <qyousef@xxxxxxxxxxx>
---
kernel/sched/cpufreq_schedutil.c | 3 ---
kernel/sched/fair.c | 13 +------------
kernel/sched/sched.h | 15 +--------------
3 files changed, 2 insertions(+), 29 deletions(-)

diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index c0879a985097..553a3d7f02d8 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -166,9 +166,6 @@ static inline bool ignore_short_tasks(int cpu,
struct task_struct *p = cpu_rq(cpu)->curr;
unsigned long task_util;

- if (!(flags & SCHED_CPUFREQ_PERF_HINTS))
- return false;
-
if (!fair_policy(p->policy))
return false;

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d63eae534cec..3a30f78b37d3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5717,8 +5717,6 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
sub_nr_running(rq, task_delta);

done:
- cpufreq_update_util(rq, 0);
-
/*
* Note: distribution will already see us throttled via the
* throttled-list. rq->lock protects completion.
@@ -5811,8 +5809,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
unthrottle_throttle:
assert_list_leaf_cfs_rq(rq);

- cpufreq_update_util(rq, 0);
-
/* Determine whether we need to wake up potentially idle CPU: */
if (rq->curr == rq->idle && rq->cfs.nr_running)
resched_curr(rq);
@@ -6675,8 +6671,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
enqueue_throttle:
assert_list_leaf_cfs_rq(rq);

- cpufreq_update_util(rq, p->in_iowait ? SCHED_CPUFREQ_IOWAIT : 0);
-
hrtick_update(rq);
}

@@ -6754,7 +6748,6 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)

dequeue_throttle:
util_est_update(&rq->cfs, p, task_sleep);
- cpufreq_update_util(rq, 0);
hrtick_update(rq);
}

@@ -8338,7 +8331,6 @@ done: __maybe_unused;

update_misfit_status(p, rq);
sched_fair_update_stop_tick(rq, p);
- cpufreq_update_util(rq, 0);

return p;

@@ -12460,7 +12452,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)

update_misfit_status(curr, rq);
update_overutilized_status(task_rq(curr));
- cpufreq_update_util(rq, SCHED_CPUFREQ_PERF_HINTS);
+ cpufreq_update_util(rq, current->in_iowait ? SCHED_CPUFREQ_IOWAIT : 0);

task_tick_core(rq, curr);
}
@@ -12585,7 +12577,6 @@ static void detach_task_cfs_rq(struct task_struct *p)
struct sched_entity *se = &p->se;

detach_entity_cfs_rq(se);
- cpufreq_update_util(task_rq(p), 0);
}

static void attach_task_cfs_rq(struct task_struct *p)
@@ -12593,7 +12584,6 @@ static void attach_task_cfs_rq(struct task_struct *p)
struct sched_entity *se = &p->se;

attach_entity_cfs_rq(se);
- cpufreq_update_util(task_rq(p), 0);
}

static void switched_from_fair(struct rq *rq, struct task_struct *p)
@@ -12839,7 +12829,6 @@ static int __sched_group_set_shares(struct task_group *tg, unsigned long shares)
update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
update_cfs_group(se);
}
- cpufreq_update_util(rq, 0);
rq_unlock_irqrestore(rq, &rf);
}

diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 516187ea2b81..e1622e2b82be 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3076,20 +3076,7 @@ static inline bool uclamp_rq_is_capped(struct rq *rq)
/* Request freq update on context switch if necessary */
static inline void uclamp_context_switch(struct rq *rq)
{
- unsigned long uclamp_min;
- unsigned long uclamp_max;
- unsigned long util;
-
- /* Only RT and FAIR tasks are aware of uclamp */
- if (!rt_policy(current->policy) && !fair_policy(current->policy))
- return;
-
- uclamp_min = uclamp_eff_value(current, UCLAMP_MIN);
- uclamp_max = uclamp_eff_value(current, UCLAMP_MAX);
- util = rq->cfs.avg.util_avg;
-
- if (uclamp_min > util || uclamp_max < util)
- cpufreq_update_util(rq, SCHED_CPUFREQ_PERF_HINTS);
+ cpufreq_update_util(rq, current->in_iowait ? SCHED_CPUFREQ_IOWAIT : 0);
}
#else /* CONFIG_UCLAMP_TASK */
static inline unsigned long uclamp_eff_value(struct task_struct *p,
--
2.40.1