[PATCH] fixup! sched/deadline: add per rq tracking of admitted bandwidth

From: Juri Lelli
Date: Thu Feb 11 2016 - 11:55:49 EST


Signed-off-by: Juri Lelli <juri.lelli@xxxxxxx>
---
include/linux/init_task.h | 1 +
include/linux/sched.h | 1 +
kernel/sched/core.c | 5 ++++-
kernel/sched/deadline.c | 26 +++++++++++++++++++++++++-
4 files changed, 31 insertions(+), 2 deletions(-)

diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index f2cb8d4..c582f9d 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -199,6 +199,7 @@ extern struct task_group root_task_group;
.policy = SCHED_NORMAL, \
.cpus_allowed = CPU_MASK_ALL, \
.nr_cpus_allowed= NR_CPUS, \
+ .fallback_cpu = -1, \
.mm = NULL, \
.active_mm = &init_mm, \
.restart_block = { \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a10494a..a6fc95c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1401,6 +1401,7 @@ struct task_struct {
struct task_struct *last_wakee;

int wake_cpu;
+ int fallback_cpu;
#endif
int on_rq;

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7fb9246..4e4bc41 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1442,7 +1442,8 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
continue;
if (!cpu_active(dest_cpu))
continue;
- if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
+ if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) {
+ p->fallback_cpu = dest_cpu;
return dest_cpu;
}
}
@@ -1490,6 +1491,7 @@ out:
}
}

+ p->fallback_cpu = dest_cpu;
return dest_cpu;
}

@@ -1954,6 +1956,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
if (task_cpu(p) != cpu) {
wake_flags |= WF_MIGRATED;
set_task_cpu(p, cpu);
+ p->fallback_cpu = -1;
}
#endif /* CONFIG_SMP */

diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 6368f43..1eccecf 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1043,6 +1043,21 @@ static void yield_task_dl(struct rq *rq)

#ifdef CONFIG_SMP

+static void swap_task_ac_bw(struct task_struct *p,
+ struct rq *from,
+ struct rq *to)
+{
+ unsigned long flags;
+
+ lockdep_assert_held(&p->pi_lock);
+ local_irq_save(flags);
+ double_rq_lock(from, to);
+ __dl_sub_ac(from, p->dl.dl_bw);
+ __dl_add_ac(to, p->dl.dl_bw);
+ double_rq_unlock(from, to);
+ local_irq_restore(flags);
+}
+
static int find_later_rq(struct task_struct *task);

static int
@@ -1077,8 +1092,10 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
if (target != -1 &&
(dl_time_before(p->dl.deadline,
cpu_rq(target)->dl.earliest_dl.curr) ||
- (cpu_rq(target)->dl.dl_nr_running == 0)))
+ (cpu_rq(target)->dl.dl_nr_running == 0))) {
cpu = target;
+ swap_task_ac_bw(p, rq, cpu_rq(target));
+ }
}
rcu_read_unlock();

@@ -1807,6 +1824,12 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p,
switched_to_dl(rq, p);
}

+static void migrate_task_rq_dl(struct task_struct *p)
+{
+ if (p->fallback_cpu != -1)
+ swap_task_ac_bw(p, task_rq(p), cpu_rq(p->fallback_cpu));
+}
+
const struct sched_class dl_sched_class = {
.next = &rt_sched_class,
.enqueue_task = enqueue_task_dl,
@@ -1820,6 +1843,7 @@ const struct sched_class dl_sched_class = {

#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_dl,
+ .migrate_task_rq = migrate_task_rq_dl,
.set_cpus_allowed = set_cpus_allowed_dl,
.rq_online = rq_online_dl,
.rq_offline = rq_offline_dl,
--
2.7.0