[RFC PATCH V2 06/19] sched: log the cpu utilization at rq

From: Preeti U Murthy
Date: Mon Aug 11 2014 - 07:35:52 EST


From: Alex Shi <alex.shi@xxxxxxxxx>

The cpu's utilization is to measure how busy is the cpu.
util = cpu_rq(cpu)->avg.runnable_avg_sum * SCHED_POEWR_SCALE
/ cpu_rq(cpu)->avg.runnable_avg_period;

Since the util is no more than 1, we scale its value with 1024, same as
SCHED_POWER_SCALE and set the FULL_UTIL as 1024.

In later power aware scheduling, we are sensitive for how busy of the
cpu. Since as to power consuming, it is tight related with cpu busy
time.

BTW, rq->util can be used for any purposes if needed, not only power
scheduling.

Signed-off-by: Alex Shi <alex.shi@xxxxxxxxx>
[Added CONFIG_SCHED_POWER switch to enable this patch]
Signed-off-by: Preeti U Murthy <preeti@xxxxxxxxxxxxxxxxxx>
---

kernel/sched/debug.c | 3 +++
kernel/sched/fair.c | 15 +++++++++++++++
kernel/sched/sched.h | 9 +++++++++
3 files changed, 27 insertions(+)

diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 627b3c3..395af7f 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -325,6 +325,9 @@ do { \

P(ttwu_count);
P(ttwu_local);
+#ifdef CONFIG_SCHED_POWER
+ P(util);
+#endif

#undef P
#undef P64
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f1b0a33..681ad06 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2446,10 +2446,25 @@ static inline void __update_group_entity_contrib(struct sched_entity *se)
}
}

+#ifdef CONFIG_SCHED_POWER
+static void update_rq_util(struct rq *rq)
+{
+ if (rq->avg.runnable_avg_period)
+ rq->util = (u64)(rq->avg.runnable_avg_sum << SCHED_CAPACITY_SHIFT)
+ / rq->avg.runnable_avg_period;
+ else
+ rq->util = (u64)(rq->avg.runnable_avg_sum << SCHED_CAPACITY_SHIFT);
+}
+#else
+static void update_rq_util(struct rq *rq) {}
+#endif
+
static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
{
__update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
__update_tg_runnable_avg(&rq->avg, &rq->cfs);
+
+ update_rq_util(rq);
}
#else /* CONFIG_FAIR_GROUP_SCHED */
static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 95fc013..971b812 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -508,6 +508,11 @@ extern struct root_domain def_root_domain;

#endif /* CONFIG_SMP */

+/* full cpu utilization */
+#ifdef CONFIG_SCHED_POWER
+#define FULL_UTIL SCHED_CAPACITY_SCALE
+#endif
+
/*
* This is the main, per-CPU runqueue data structure.
*
@@ -556,6 +561,10 @@ struct rq {
struct sched_avg avg;
#endif /* CONFIG_FAIR_GROUP_SCHED */

+#ifdef CONFIG_SCHED_POWER
+ unsigned int util;
+#endif /* CONFIG_SCHED_POWER */
+
/*
* This is part of a global counter where only the total sum
* over all CPUs matters. A task can increase this counter on

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/