[PATCH v2] sched/fair: simplify __calc_delta()

From: Dawei Li
Date: Thu Mar 14 2024 - 22:03:07 EST


Commit 5e963f2bd4654a202a8a05aa3a86cb0300b10e6c ("sched/fair: Commit to
EEVDF") removed __calc_delta()'s use case where the input weight is not
equal to NICE_0_LOAD. Now that weight is always NICE_0_LOAD, it is not
required to have it as an input parameter. NICE_0_LOAD could be
incorporated in __calc_delta() directly.

Also, when weight is always NICE_0_LOAD, the initial fact value is
always 2**10, and the first fact_hi will always be 0. Thus, the first
if block can be removed.

The previous comment "(delta_exec * (weight * lw->inv_weight)) >>
WMULT_SHIFT" seems to be assuming that lw->weight * lw->inv_weight is
always (approximately) equal to 2**WMULT_SHIFT. However, when
CONFIG_64BIT is set, lw->weight * lw->inv_weight is (approximately)
equal to 2**WMULT_SHIFT * 2**10. What remains true for both CONFIG_32BIT
and CONFIG_64BIT is: scale_load_down(lw->weight) * lw->inv_weight is
(approximately) equal to 2**WMULT_SHIFT. Fix the comment so that it
is correct for both CONFIG_32BIT and CONFIG_64BIT.

Update the comment for calc_delta_fair to make it more precise.

Signed-off-by: Dawei Li <daweilics@xxxxxxxxx>
---
Changes in v2:
- update commit message
- reorder the variables
---
kernel/sched/fair.c | 29 ++++++++++-------------------
1 file changed, 10 insertions(+), 19 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a19ea290b790..e1869cf454ea 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -252,32 +252,23 @@ static void __update_inv_weight(struct load_weight *lw)
}

/*
- * delta_exec * weight / lw.weight
+ * delta_exec * NICE_0_LOAD / lw->weight
* OR
- * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
+ * (delta_exec * scale_load_down(NICE_0_LOAD) * lw->inv_weight) >> WMULT_SHIFT
*
- * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
- * we're guaranteed shift stays positive because inv_weight is guaranteed to
- * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
- *
- * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
- * weight/lw.weight <= 1, and therefore our shift will also be positive.
+ * We're guaranteed shift stays positive because inv_weight is guaranteed to
+ * fit 32 bits, and scale_load_down(NICE_0_LOAD) gives another 10 bits;
+ * therefore shift >= 22.
*/
-static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
+static u64 __calc_delta(u64 delta_exec, struct load_weight *lw)
{
- u64 fact = scale_load_down(weight);
- u32 fact_hi = (u32)(fact >> 32);
+ u64 fact = scale_load_down(NICE_0_LOAD);
int shift = WMULT_SHIFT;
+ u32 fact_hi;
int fs;

__update_inv_weight(lw);

- if (unlikely(fact_hi)) {
- fs = fls(fact_hi);
- shift -= fs;
- fact >>= fs;
- }
-
fact = mul_u32_u32(fact, lw->inv_weight);

fact_hi = (u32)(fact >> 32);
@@ -291,12 +282,12 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
}

/*
- * delta /= w
+ * delta *= NICE_0_LOAD / se->load.weight
*/
static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
{
if (unlikely(se->load.weight != NICE_0_LOAD))
- delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
+ delta = __calc_delta(delta, &se->load);

return delta;
}
--
2.40.1