[tip:perf/urgent] perf: Provide better condition for event rotation

From: tip-bot for Peter Zijlstra
Date: Wed Mar 10 2010 - 08:14:11 EST


Commit-ID: d4944a06666054707d23e11888e480af239e5abf
Gitweb: http://git.kernel.org/tip/d4944a06666054707d23e11888e480af239e5abf
Author: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
AuthorDate: Mon, 8 Mar 2010 13:51:20 +0100
Committer: Ingo Molnar <mingo@xxxxxxx>
CommitDate: Wed, 10 Mar 2010 13:22:36 +0100

perf: Provide better condition for event rotation

Try to avoid useless rotation and PMU disables.

[ Could be improved by keeping a nr_runnable count to better account
for the < PERF_STAT_INACTIVE counters ]

Signed-off-by: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
Cc: Arnaldo Carvalho de Melo <acme@xxxxxxxxxxxxx>
Cc: paulus@xxxxxxxxx
Cc: eranian@xxxxxxxxxx
Cc: robert.richter@xxxxxxx
Cc: fweisbec@xxxxxxxxx
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@xxxxxxx>
---
kernel/perf_event.c | 21 +++++++++++++++------
1 files changed, 15 insertions(+), 6 deletions(-)

diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index d810846..52c69a3 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1524,12 +1524,15 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
*/
if (interrupts == MAX_INTERRUPTS) {
perf_log_throttle(event, 1);
+ perf_disable();
event->pmu->unthrottle(event);
+ perf_enable();
}

if (!event->attr.freq || !event->attr.sample_freq)
continue;

+ perf_disable();
event->pmu->read(event);
now = atomic64_read(&event->count);
delta = now - hwc->freq_count_stamp;
@@ -1537,6 +1540,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)

if (delta > 0)
perf_adjust_period(event, TICK_NSEC, delta);
+ perf_enable();
}
raw_spin_unlock(&ctx->lock);
}
@@ -1546,9 +1550,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
*/
static void rotate_ctx(struct perf_event_context *ctx)
{
- if (!ctx->nr_events)
- return;
-
raw_spin_lock(&ctx->lock);

/* Rotate the first entry last of non-pinned groups */
@@ -1561,19 +1562,28 @@ void perf_event_task_tick(struct task_struct *curr)
{
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;
+ int rotate = 0;

if (!atomic_read(&nr_events))
return;

cpuctx = &__get_cpu_var(perf_cpu_context);
- ctx = curr->perf_event_ctxp;
+ if (cpuctx->ctx.nr_events &&
+ cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
+ rotate = 1;

- perf_disable();
+ ctx = curr->perf_event_ctxp;
+ if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
+ rotate = 1;

perf_ctx_adjust_freq(&cpuctx->ctx);
if (ctx)
perf_ctx_adjust_freq(ctx);

+ if (!rotate)
+ return;
+
+ perf_disable();
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx)
task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
@@ -1585,7 +1595,6 @@ void perf_event_task_tick(struct task_struct *curr)
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
if (ctx)
task_ctx_sched_in(curr, EVENT_FLEXIBLE);
-
perf_enable();
}

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/