[PATCH 18/30] sched: hierarchical load vs affine wakeups

From: Peter Zijlstra
Date: Fri Jun 27 2008 - 08:04:49 EST


With hierarchical grouping we can't just compare task weight to rq weight - we
need to scale the weight appropriately.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
---
kernel/sched_fair.c | 23 +++++++++++++++++++++--
1 file changed, 21 insertions(+), 2 deletions(-)

Index: linux-2.6/kernel/sched_fair.c
===================================================================
--- linux-2.6.orig/kernel/sched_fair.c
+++ linux-2.6/kernel/sched_fair.c
@@ -1071,6 +1071,25 @@ static inline int wake_idle(int cpu, str

static const struct sched_class fair_sched_class;

+#ifdef CONFIG_FAIR_GROUP_SCHED
+static unsigned long task_h_load(struct task_struct *p)
+{
+ unsigned long h_load = p->se.load.weight;
+ struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
+
+ update_h_load(task_cpu(p));
+
+ h_load = calc_delta_mine(h_load, cfs_rq->h_load, &cfs_rq->load);
+
+ return h_load;
+}
+#else
+static unsigned long task_h_load(struct task_struct *p)
+{
+ return p->se.load.weight;
+}
+#endif
+
static int
wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
struct task_struct *p, int prev_cpu, int this_cpu, int sync,
@@ -1091,9 +1110,9 @@ wake_affine(struct rq *rq, struct sched_
* of the current CPU:
*/
if (sync)
- tl -= current->se.load.weight;
+ tl -= task_h_load(current);

- balanced = 100*(tl + p->se.load.weight) <= imbalance*load;
+ balanced = 100*(tl + task_h_load(p)) <= imbalance*load;

/*
* If the currently running task will sleep within

--

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/