[PATCH] sched/deadline: add GRUB bw change tracepoints

From: Claudio Scordino
Date: Tue Jul 04 2017 - 08:31:41 EST


This patch adds a tracepoint for tracing the total and running
bandwidths of GRUB's runqueue.

Signed-off-by: Claudio Scordino <claudio@xxxxxxxxxxxxxxx>
Signed-off-by: Juri Lelli <juri.lelli@xxxxxxx>
Signed-off-by: Luca Abeni <luca.abeni@xxxxxxxxxxxxxxx>
---
include/trace/events/sched.h | 32 ++++++++++++++++++++++++++++++++
kernel/sched/deadline.c | 11 +++++++++++
2 files changed, 43 insertions(+)

diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index ae1409f..050fcb2 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -545,6 +545,38 @@ TRACE_EVENT(sched_swap_numa,
__entry->dst_cpu, __entry->dst_nid)
);

+DECLARE_EVENT_CLASS(sched_dl_grub_template,
+
+ TP_PROTO(u64 this_bw, u64 running_bw, unsigned int cpu_id),
+
+ TP_ARGS(this_bw, running_bw, cpu_id),
+
+ TP_STRUCT__entry(
+ __field(u64, this_bw)
+ __field(u64, running_bw)
+ __field(u32, cpu_id)
+ ),
+
+ TP_fast_assign(
+ __entry->this_bw = this_bw;
+ __entry->running_bw = running_bw;
+ __entry->cpu_id = cpu_id;
+ ),
+
+ TP_printk("total_bw=%llu running_bw=%llu cpu_id=%lu",
+ (unsigned long long)__entry->this_bw,
+ (unsigned long long)__entry->running_bw,
+ (unsigned long)__entry->cpu_id)
+);
+
+
+DEFINE_EVENT(sched_dl_grub_template, sched_dl_grub,
+
+ TP_PROTO(u64 this_bw, u64 running_bw, unsigned int cpu_id),
+
+ TP_ARGS(this_bw, running_bw, cpu_id)
+);
+
/*
* Tracepoint for waking a polling cpu without an IPI.
*/
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index a84299f..ae5c7ef 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -17,6 +17,7 @@
#include "sched.h"

#include <linux/slab.h>
+#include <trace/events/sched.h>
#include <uapi/linux/sched/types.h>

struct dl_bandwidth def_dl_bandwidth;
@@ -85,6 +86,8 @@ void add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
dl_rq->running_bw += dl_bw;
SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
+ trace_sched_dl_grub(dl_rq->this_bw, dl_rq->running_bw,
+ rq_of_dl_rq(dl_rq)->cpu);
}

static inline
@@ -97,6 +100,8 @@ void sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
if (dl_rq->running_bw > old)
dl_rq->running_bw = 0;
+ trace_sched_dl_grub(dl_rq->this_bw, dl_rq->running_bw,
+ rq_of_dl_rq(dl_rq)->cpu);
}

static inline
@@ -107,6 +112,9 @@ void add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
dl_rq->this_bw += dl_bw;
SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
+ trace_sched_dl_grub(dl_rq->this_bw, dl_rq->running_bw,
+ rq_of_dl_rq(dl_rq)->cpu);
+
}

static inline
@@ -120,6 +128,9 @@ void sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
if (dl_rq->this_bw > old)
dl_rq->this_bw = 0;
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
+ trace_sched_dl_grub(dl_rq->this_bw, dl_rq->running_bw,
+ rq_of_dl_rq(dl_rq)->cpu);
+
}

void dl_change_utilization(struct task_struct *p, u64 new_bw)
--
2.7.4