[PATCH V3 6/6] perf/x86: calculate sampling overhead

From: kan . liang
Date: Thu Dec 08 2016 - 16:28:31 EST


From: Kan Liang <kan.liang@xxxxxxxxx>

On x86, NMI handler is the most important part which brings overhead
for sampling. Adding a pmu specific overhead type
PERF_PMU_SAMPLE_OVERHEAD for it.

For other architectures which may don't have NMI, the overhead type can
be reused.

Signed-off-by: Kan Liang <kan.liang@xxxxxxxxx>
---
arch/x86/events/core.c | 8 +++++++-
include/uapi/linux/perf_event.h | 1 +
2 files changed, 8 insertions(+), 1 deletion(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 09ab36a..1e57ccf 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1478,8 +1478,10 @@ void perf_events_lapic_init(void)
static int
perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
+ struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu.pmu_cpu_context);
u64 start_clock;
u64 finish_clock;
+ u64 clock;
int ret;

/*
@@ -1492,8 +1494,12 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
start_clock = sched_clock();
ret = x86_pmu.handle_irq(regs);
finish_clock = sched_clock();
+ clock = finish_clock - start_clock;
+ perf_sample_event_took(clock);

- perf_sample_event_took(finish_clock - start_clock);
+ /* calculate NMI overhead */
+ cpuctx->overhead[PERF_PMU_SAMPLE_OVERHEAD].nr++;
+ cpuctx->overhead[PERF_PMU_SAMPLE_OVERHEAD].time += clock;

return ret;
}
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 7ba6d30..954b116 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -1004,6 +1004,7 @@ enum perf_record_overhead_type {
PERF_CORE_MUX_OVERHEAD = 0,
PERF_CORE_SB_OVERHEAD,
/* PMU specific */
+ PERF_PMU_SAMPLE_OVERHEAD,
PERF_OVERHEAD_MAX,
};

--
2.4.3