[PATCH 2/2] sched/fair: Cleanup in migrate_degrades_locality() to improve readability

From: Swapnil Sapkal
Date: Wed Jun 14 2023 - 06:26:16 EST


The migrate_degrades_locality() returns tristate value whether
the migration will improve locality, degrades locality or no
impact. Handle this return values with enum to improve the
readability.

Signed-off-by: Swapnil Sapkal <swapnil.sapkal@xxxxxxx>
---
kernel/sched/fair.c | 69 +++++++++++++++++++++++++++++----------------
1 file changed, 44 insertions(+), 25 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9a8e5dcbe7e6..06813ce5356e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8443,45 +8443,52 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
return delta < (s64)sysctl_sched_migration_cost;
}

+enum migration_impact {
+ /* if task migration is not affected by locality */
+ no_change = -1,
+
+ /* if task migration improves locality i.e migration preferred */
+ improves_locality = 0,
+
+ /* if task migration degrades locality */
+ degrades_locality = 1
+};
+
#ifdef CONFIG_NUMA_BALANCING
-/*
- * Returns 1, if task migration degrades locality
- * Returns 0, if task migration improves locality i.e migration preferred.
- * Returns -1, if task migration is not affected by locality.
- */
-static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
+static enum migration_impact
+migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
{
struct numa_group *numa_group = rcu_dereference(p->numa_group);
unsigned long src_weight, dst_weight;
int src_nid, dst_nid, dist;

if (!static_branch_likely(&sched_numa_balancing))
- return -1;
+ return no_change;

if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
- return -1;
+ return no_change;

src_nid = cpu_to_node(env->src_cpu);
dst_nid = cpu_to_node(env->dst_cpu);

if (src_nid == dst_nid)
- return -1;
+ return no_change;

/* Migrating away from the preferred node is always bad. */
if (src_nid == p->numa_preferred_nid) {
if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
- return 1;
+ return degrades_locality;
else
- return -1;
+ return no_change;
}

/* Encourage migration to the preferred node. */
if (dst_nid == p->numa_preferred_nid)
- return 0;
+ return improves_locality;

/* Leaving a core idle is often worse than degrading locality. */
if (env->idle == CPU_IDLE)
- return -1;
+ return no_change;

dist = node_distance(src_nid, dst_nid);
if (numa_group) {
@@ -8492,14 +8499,14 @@ static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
dst_weight = task_weight(p, dst_nid, dist);
}

- return dst_weight < src_weight;
+ return (dst_weight < src_weight) ? degrades_locality : improves_locality;
}

#else
-static inline int migrate_degrades_locality(struct task_struct *p,
- struct lb_env *env)
+static inline enum migration_impact
+migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
{
- return -1;
+ return no_change;
}
#endif

@@ -8509,7 +8516,7 @@ static inline int migrate_degrades_locality(struct task_struct *p,
static
int can_migrate_task(struct task_struct *p, struct lb_env *env, int *tsk_cache_hot)
{
- int degrades_locality;
+ enum migration_impact migration_impact;

lockdep_assert_rq_held(env->src_rq);

@@ -8578,18 +8585,30 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env, int *tsk_cache_h
if (env->flags & LBF_ACTIVE_LB)
return 1;

- degrades_locality = migrate_degrades_locality(p, env);
- if (degrades_locality == -1)
+ migration_impact = migrate_degrades_locality(p, env);
+
+ switch (migration_impact) {
+ case no_change:
*tsk_cache_hot = task_hot(p, env);
- else
- *tsk_cache_hot = degrades_locality;
+ break;
+
+ case degrades_locality:
+ *tsk_cache_hot = 1;
+ break;
+
+ case improves_locality:
+ *tsk_cache_hot = 0;
+ break;
+ }
+
+ if (!(*tsk_cache_hot))
+ return 1;

/*
- * Can migrate a hot task only after the attempts to reach balance
+ * Can migrate a task only after the attempts to reach balance
* without the task have exceeded the cache_nice_tries threshold.
*/
- if (!(*tsk_cache_hot) ||
- env->sd->nr_balance_failed > env->sd->cache_nice_tries)
+ if (env->sd->nr_balance_failed > env->sd->cache_nice_tries)
return 1;

schedstat_inc(p->stats.nr_failed_migrations_hot);
--
2.34.1