[PATCH 3/3][RFC] tracing: Add config to permanently have trace_printk() use percpu

From: Steven Rostedt
Date: Sat Oct 08 2011 - 13:13:43 EST


From: Steven Rostedt <srostedt@xxxxxxxxxx>

For those that do a lot of developing, and use trace_printk() often.
There is no need to have a switch to change between percpu buffers and
a locked protected buffer. Just have the percpu buffers permanently
allocated via a config option and keep a fast and non-intrusive
tracing mechanism that can handle tracing in NMIs, but sacrifices
memory overhead. But developers don't mind that.

Signed-off-by: Steven Rostedt <rostedt@xxxxxxxxxxx>
---
kernel/trace/Kconfig | 18 ++++++++++++++++++
kernel/trace/trace.c | 41 ++++++++++++++++++++++++++++++++++++-----
2 files changed, 54 insertions(+), 5 deletions(-)

diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 2ad39e5..18bd377 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -223,6 +223,24 @@ config SCHED_TRACER
This tracer tracks the latency of the highest priority task
to be scheduled in, starting from the point it has woken up.

+config TRACE_PRINTK_PERCPU
+ bool "Allocate trace_printk() per_cpu buffers"
+ depends on TRACING
+ help
+ By default trace_printk() uses a singe 1KB buffer protected
+ with a spinlock. It also may lose trace data from NMIs, if an
+ NMI happens on a CPU that a trace is being recorded on.
+
+ By default, per_cpu buffers can be allocated either by
+ setting trace_printk_percpu on the kernel command line,
+ or by enabling the trace option trace_printk_percpu.
+
+ For developers that want it always set, by enabling this
+ config, per_cpu buffers will be permanently set, removing
+ any logic to check between single and per cpu buffers
+ adding less impact to tracing with the added cost of more
+ memory overhead.
+
config ENABLE_DEFAULT_TRACERS
bool "Trace process context switches and events"
depends on !GENERIC_TRACER
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c59ab4c..b245a4e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -338,7 +338,11 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
/* trace_flags holds trace_options default values */
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
- TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
+ TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE
+#ifdef CONFIG_TRACE_PRINTK_PERCPU
+ | TRACE_ITER_PRINTK_PERCPU
+#endif
+ ;

static int trace_stop_count;
static DEFINE_SPINLOCK(tracing_start_lock);
@@ -1450,15 +1454,32 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags)

#endif /* CONFIG_STACKTRACE */

-static char trace_buf[TRACE_BUF_SIZE];
-static arch_spinlock_t trace_buf_lock =
- (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
-
/* created for use with alloc_percpu */
struct trace_buffer_struct {
char buffer[TRACE_BUF_SIZE];
};

+#ifdef CONFIG_TRACE_PRINTK_PERCPU
+static DEFINE_PER_CPU(struct trace_buffer_struct, trace_buffers);
+static DEFINE_PER_CPU(struct trace_buffer_struct, trace_nmi_buffers);
+
+static inline char *get_trace_buf(struct trace_array *tr, int *put)
+{
+ if (in_nmi())
+ return __get_cpu_var(trace_nmi_buffers).buffer;
+ else
+ return __get_cpu_var(trace_buffers).buffer;
+}
+
+static inline void put_trace_buf(struct trace_array *tr, int put)
+{
+}
+
+#else
+static char trace_buf[TRACE_BUF_SIZE];
+static arch_spinlock_t trace_buf_lock =
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+
static struct trace_buffer_struct *trace_percpu_buffer;
static struct trace_buffer_struct *trace_percpu_nmi_buffer;

@@ -1526,6 +1547,7 @@ static void put_trace_buf(struct trace_array *tr, int put)
arch_spin_unlock(&trace_buf_lock);
atomic_dec_return(&data->disabled);
}
+#endif /* CONFIG_TRACE_PRINTK_PERCPU */

/**
* trace_vbprintk - write binary msg to tracing buffer
@@ -2708,6 +2730,7 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
return -EINVAL;
}

+#ifndef CONFIG_TRACE_PRINTK_PERCPU
static void alloc_percpu_trace_buffer(void)
{
struct trace_buffer_struct *buffers;
@@ -2752,6 +2775,14 @@ static void free_percpu_trace_buffer(void)
free_percpu(buffers);
free_percpu(nmi_buffers);
}
+#else
+static void alloc_percpu_trace_buffer(void) { }
+static void free_percpu_trace_buffer(void)
+{
+ /* trace_printk() buffers are permanently on */
+ trace_flags |= TRACE_ITER_PRINTK_PERCPU;
+}
+#endif

static void set_tracer_flags(unsigned int mask, int enabled)
{
--
1.7.6.3


Attachment: signature.asc
Description: This is a digitally signed message part