[PATCH v3 3/5] sched/deadline: Remove unused variable extra_bw

From: Vineeth Pillai
Date: Sun May 14 2023 - 22:57:51 EST


Since we do not use extra_bw for GRUB, remove its usage.

Signed-off-by: Vineeth Pillai (Google) <vineeth@xxxxxxxxxxxxxxx>
---
kernel/sched/deadline.c | 53 ++++++++++++-----------------------------
kernel/sched/sched.h | 1 -
2 files changed, 15 insertions(+), 39 deletions(-)

diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 85902c4c484b..67c1138df43a 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -163,20 +163,6 @@ static inline bool dl_bw_visited(int cpu, u64 gen)
return false;
}

-static inline
-void __dl_update(struct dl_bw *dl_b, s64 bw)
-{
- struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
- int i;
-
- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
- "sched RCU must be held");
- for_each_cpu_and(i, rd->span, cpu_active_mask) {
- struct rq *rq = cpu_rq(i);
-
- rq->dl.extra_bw += bw;
- }
-}
#else
static inline struct dl_bw *dl_bw_of(int i)
{
@@ -198,27 +184,18 @@ static inline bool dl_bw_visited(int cpu, u64 gen)
return false;
}

-static inline
-void __dl_update(struct dl_bw *dl_b, s64 bw)
-{
- struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
-
- dl->extra_bw += bw;
-}
#endif

static inline
-void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
+void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw)
{
dl_b->total_bw -= tsk_bw;
- __dl_update(dl_b, (s32)tsk_bw / cpus);
}

static inline
-void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
+void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
{
dl_b->total_bw += tsk_bw;
- __dl_update(dl_b, -((s32)tsk_bw / cpus));
}

static inline bool
@@ -430,7 +407,7 @@ static void task_non_contending(struct task_struct *p)
if (READ_ONCE(p->__state) == TASK_DEAD)
sub_rq_bw(&p->dl, &rq->dl);
raw_spin_lock(&dl_b->lock);
- __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
+ __dl_sub(dl_b, p->dl.dl_bw);
raw_spin_unlock(&dl_b->lock);
__dl_clear_params(p);
}
@@ -721,12 +698,12 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
*/
dl_b = &rq->rd->dl_bw;
raw_spin_lock(&dl_b->lock);
- __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
+ __dl_sub(dl_b, p->dl.dl_bw);
raw_spin_unlock(&dl_b->lock);

dl_b = &later_rq->rd->dl_bw;
raw_spin_lock(&dl_b->lock);
- __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
+ __dl_add(dl_b, p->dl.dl_bw);
raw_spin_unlock(&dl_b->lock);

set_task_cpu(p, later_rq->cpu);
@@ -1425,7 +1402,7 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
}

raw_spin_lock(&dl_b->lock);
- __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
+ __dl_sub(dl_b, p->dl.dl_bw);
raw_spin_unlock(&dl_b->lock);
__dl_clear_params(p);

@@ -2506,7 +2483,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
* until we complete the update.
*/
raw_spin_lock(&src_dl_b->lock);
- __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
+ __dl_sub(src_dl_b, p->dl.dl_bw);
raw_spin_unlock(&src_dl_b->lock);
}

@@ -2560,7 +2537,7 @@ void dl_add_task_root_domain(struct task_struct *p)
dl_b = &rq->rd->dl_bw;
raw_spin_lock(&dl_b->lock);

- __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
+ __dl_add(dl_b, p->dl.dl_bw);

raw_spin_unlock(&dl_b->lock);

@@ -2779,9 +2756,9 @@ int sched_dl_global_validate(void)
static void init_dl_rq_bw(struct dl_rq *dl_rq)
{
if (global_rt_runtime() == RUNTIME_INF)
- dl_rq->max_bw = dl_rq->extra_bw = 1 << BW_SHIFT;
+ dl_rq->max_bw = 1 << BW_SHIFT;
else
- dl_rq->max_bw = dl_rq->extra_bw = to_ratio(global_rt_period(),
+ dl_rq->max_bw = to_ratio(global_rt_period(),
global_rt_runtime());
}

@@ -2852,8 +2829,8 @@ int sched_dl_overflow(struct task_struct *p, int policy,
if (dl_policy(policy) && !task_has_dl_policy(p) &&
!__dl_overflow(dl_b, cap, 0, new_bw)) {
if (hrtimer_active(&p->dl.inactive_timer))
- __dl_sub(dl_b, p->dl.dl_bw, cpus);
- __dl_add(dl_b, new_bw, cpus);
+ __dl_sub(dl_b, p->dl.dl_bw);
+ __dl_add(dl_b, new_bw);
err = 0;
} else if (dl_policy(policy) && task_has_dl_policy(p) &&
!__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
@@ -2864,8 +2841,8 @@ int sched_dl_overflow(struct task_struct *p, int policy,
* But this would require to set the task's "inactive
* timer" when the task is not inactive.
*/
- __dl_sub(dl_b, p->dl.dl_bw, cpus);
- __dl_add(dl_b, new_bw, cpus);
+ __dl_sub(dl_b, p->dl.dl_bw);
+ __dl_add(dl_b, new_bw);
dl_change_utilization(p, new_bw);
err = 0;
} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
@@ -3044,7 +3021,7 @@ int dl_cpu_busy(int cpu, struct task_struct *p)
* We will free resources in the source root_domain
* later on (see set_cpus_allowed_dl()).
*/
- __dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu));
+ __dl_add(dl_b, p->dl.dl_bw);
}

raw_spin_unlock_irqrestore(&dl_b->lock, flags);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1bc7ae9ad349..33db99756624 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -751,7 +751,6 @@ struct dl_rq {
* runqueue (inactive utilization = this_bw - running_bw).
*/
u64 this_bw;
- u64 extra_bw;

/*
* Maximum available bandwidth for deadline tasks of this rq. This is
--
2.40.1