[PATCH v2] trace: Add migrate-disabled counter to tracing output.

From: Sebastian Andrzej Siewior
Date: Tue Aug 10 2021 - 09:26:39 EST


From: Thomas Gleixner <tglx@xxxxxxxxxxxxx>

migrate_disable() forbids task migration to another CPU. It is available
since v5.11 and has already users such as highmem or BPF. It is useful
to observe this task state in tracing which already has other states
like the preemption counter.

Instead of adding the migrate disable counter as a new entry to struct
trace_entry, which would extend the whole struct by four bytes, it is
squashed into the preempt-disable counter. The lower four bits represent
the preemption counter, the upper four bits represent the migrate
disable counter. Both counter shouldn't exceed 15 but if they do, there
is a safety net which caps the value at 15.

Add the migrate-disable counter to the trace entry so it shows up in the
trace. Due to the users mentioned above, it is already possible to
observe it:

| bash-1108 [000] ...21 73.950578: rss_stat: mm_id=2213312838 curr=0 type=MM_ANONPAGES size=8192B
| bash-1108 [000] d..31 73.951222: irq_disable: caller=flush_tlb_mm_range+0x115/0x130 parent=ptep_clear_flush+0x42/0x50
| bash-1108 [000] d..31 73.951222: tlb_flush: pages:1 reason:local mm shootdown (3)

The last value is the migrate-disable counter.

Things that popped up:
- trace_print_lat_context() does not print the migrate counter. Not sure
if it should. It is used in "verbose" mode and uses 8 digits and I'm
not sure ther is something processing the value.

- trace_define_common_fields() now defines a different variable. This
probably breaks things. No ide what to do in order to preserve the old
behaviour. Since this is used as a filter it should be split somehow
to be able to match both nibbles here.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
[bigeasy: patch description.]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>
---
v1…v2:
- use the upper nibble of the preemption counter for the migrate
disable counter and the lower for the preemption counter. This
avoids extending the struct by four bytes due to alignment /
padding.

include/linux/trace_events.h | 4 ++--
kernel/trace/trace.c | 26 +++++++++++++++++++-------
kernel/trace/trace_events.c | 3 ++-
kernel/trace/trace_output.c | 11 ++++++++---
4 files changed, 31 insertions(+), 13 deletions(-)

diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index ad413b382a3ca..92680a59740e4 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -67,7 +67,7 @@ void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
struct trace_entry {
unsigned short type;
unsigned char flags;
- unsigned char preempt_count;
+ unsigned char preempt_mg_count;
int pid;
};

@@ -156,7 +156,7 @@ static inline void tracing_generic_entry_update(struct trace_entry *entry,
unsigned short type,
unsigned int trace_ctx)
{
- entry->preempt_count = trace_ctx & 0xff;
+ entry->preempt_mg_count = trace_ctx & 0xff;
entry->pid = current->pid;
entry->type = type;
entry->flags = trace_ctx >> 16;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c59dd35a6da5c..ea9082fdcc69a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2603,6 +2603,15 @@ enum print_line_t trace_handle_return(struct trace_seq *s)
}
EXPORT_SYMBOL_GPL(trace_handle_return);

+static unsigned short migration_disable_value(void)
+{
+#if defined(CONFIG_SMP)
+ return current->migration_disabled;
+#else
+ return 0;
+#endif
+}
+
unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
{
unsigned int trace_flags = irqs_status;
@@ -2621,7 +2630,8 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
trace_flags |= TRACE_FLAG_NEED_RESCHED;
if (test_preempt_need_resched())
trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
- return (trace_flags << 16) | (pc & 0xff);
+ return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
+ (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
}

struct ring_buffer_event *
@@ -4177,9 +4187,10 @@ static void print_lat_help_header(struct seq_file *m)
"# | / _----=> need-resched \n"
"# || / _---=> hardirq/softirq \n"
"# ||| / _--=> preempt-depth \n"
- "# |||| / delay \n"
- "# cmd pid ||||| time | caller \n"
- "# \\ / ||||| \\ | / \n");
+ "# |||| / _-=> migrate-disable \n"
+ "# ||||| / delay \n"
+ "# cmd pid |||||| time | caller \n"
+ "# \\ / |||||| \\ | / \n");
}

static void print_event_info(struct array_buffer *buf, struct seq_file *m)
@@ -4217,9 +4228,10 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file
seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
- seq_printf(m, "# %.*s||| / delay\n", prec, space);
- seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
- seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
+ seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
+ seq_printf(m, "# %.*s|||| / delay\n", prec, space);
+ seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
+ seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
}

void
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 80e96989770ed..5817d14e8095f 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -181,7 +181,8 @@ static int trace_define_common_fields(void)

__common_field(unsigned short, type);
__common_field(unsigned char, flags);
- __common_field(unsigned char, preempt_count);
+ /* XXX */
+ __common_field(unsigned char, preempt_mg_count);
__common_field(int, pid);

return ret;
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index a0bf446bb0348..def0d8de2df6f 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -492,8 +492,13 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
trace_seq_printf(s, "%c%c%c",
irqs_off, need_resched, hardsoft_irq);

- if (entry->preempt_count)
- trace_seq_printf(s, "%x", entry->preempt_count);
+ if (entry->preempt_mg_count & 0xf)
+ trace_seq_printf(s, "%x", entry->preempt_mg_count & 0xf);
+ else
+ trace_seq_putc(s, '.');
+
+ if (entry->preempt_mg_count & 0xf0)
+ trace_seq_printf(s, "%x", entry->preempt_mg_count >> 4);
else
trace_seq_putc(s, '.');

@@ -656,7 +661,7 @@ int trace_print_lat_context(struct trace_iterator *iter)
trace_seq_printf(
s, "%16s %7d %3d %d %08x %08lx ",
comm, entry->pid, iter->cpu, entry->flags,
- entry->preempt_count, iter->idx);
+ entry->preempt_mg_count & 0xf, iter->idx);
} else {
lat_print_generic(s, entry, iter->cpu);
}
--
2.32.0