[RFC][PATCHv6 11/12] printk: add offloading watchdog API

From: Sergey Senozhatsky
Date: Mon Dec 04 2017 - 08:50:00 EST


Introduce printk_offloading watchdog API to control the behaviour
of offloading. Some of the control paths, basically, disable the
soft-lockup watchdog by calling touch_all_softlockup_watchdogs().
One such example could be sysrq-t:

__handle_sysrq()
sysrq_handle_showstate()
show_state()
show_state_filter()
touch_all_softlockup_watchdogs()

This control path deliberately and forcibly silent the watchdog
for various reasons, one of which is the fact that sysrq-t may
be called when system is in bad condition and we need to print
backtraces as soon as possible. The argument here might be that
"In this case calling into the scheduler from printk offloading
may be dangerous and in general should be avoided"

But this argument is sort of false. And the reason is that we
already can call into the scheduler from sysrq, simply because
every time we call printk() from show_state() loop we end up
in up():

</sysrq>
__handle_sysrq()
sysrq_handle_showstate()
show_state()
show_state_filter()
printk()
console_unlock()
up()
wake_up_process()

So offloading to printk_kthread does not add anything to the
picture. It, however, does change the behaviour of sysrq in
some corner cases, and we have regression reports. The problem
is that sysrq attempts to "flush all pending logbuf messages"
before it actually handles the sysrq: all those pr_info()
and pr_cont() in __handle_sysrq(). Offloading to printk
kthread will let sysrq to handle sysrq event before we flush
logbuf entries, so emergency_restart(), for instance, may lead
to missing kernel logs.

The thing we need to notice is that such "flush logbuf from
sysrq" has never been guaranteed and, in fact, is surprising,
to say the least. For example, emergency_restart() is probably
should not call into any subsystems and just reboot the kernel.
We have what we have, and with this patch we are just trying
to preserve the existing behaviour.

This patch adds touch_printk_offloading_watchdog() call to
watchdog's touch_softlockup_watchdog(), so each time a control
path resets watchdog it also resets the printk offloading
timestamp, effective disabling printing offloading.

Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@xxxxxxxxx>
---
include/linux/console.h | 2 ++
kernel/printk/printk.c | 27 ++++++++++++++++++++++++---
kernel/watchdog.c | 6 +++++-
3 files changed, 31 insertions(+), 4 deletions(-)

diff --git a/include/linux/console.h b/include/linux/console.h
index 8ce29b2381d2..7408a345f4b1 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -191,6 +191,8 @@ extern void printk_emergency_begin(void);
extern void printk_emergency_end(void);
extern int printk_emergency_begin_sync(void);
extern int printk_emergency_end_sync(void);
+extern void touch_printk_offloading_watchdog(void);
+extern void touch_printk_offloading_watchdog_on_cpu(unsigned int cpu);

int mda_console_init(void);
void prom_con_init(void);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 2f9697c71cf1..2a1ec075cc13 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -460,6 +460,9 @@ static atomic_t printk_emergency __read_mostly;
*/
static bool offloading_enabled;

+/* How long have this CPU spent in console_unlock() */
+static DEFINE_PER_CPU(u64, printing_elapsed);
+
module_param_named(offloading_enabled, offloading_enabled, bool, 0644);
MODULE_PARM_DESC(offloading_enabled,
"enable/disable print out offloading to printk kthread");
@@ -553,6 +556,23 @@ static inline int offloading_threshold(void)
return min(10, timeout);
}

+/*
+ * Must be called by the watchdog. When control path calls
+ * touch_all_softlockup_watchdogs() or touch_softlockup_watchdog()
+ * to silent the watchdog we need to also reset the printk
+ * offloading counter in order to avoid printk offloading from a
+ * potentially unsafe context.
+ */
+void touch_printk_offloading_watchdog(void)
+{
+ this_cpu_write(printing_elapsed, 0);
+}
+
+void touch_printk_offloading_watchdog_on_cpu(unsigned int cpu)
+{
+ per_cpu(printing_elapsed, cpu) = 0;
+}
+
/*
* Under heavy printing load or with a slow serial console (or both)
* console_unlock() can stall CPUs, which can result in soft/hard-lockups,
@@ -564,7 +584,6 @@ static inline int offloading_threshold(void)
*/
static inline bool should_handoff_printing(u64 printing_start_ts)
{
- static DEFINE_PER_CPU(u64, printing_elapsed);
static struct task_struct *printing_task;
u64 now = local_clock();
bool emergency = !printk_offloading_enabled();
@@ -578,7 +597,7 @@ static inline bool should_handoff_printing(u64 printing_start_ts)

/* A new task - reset the counters. */
if (printing_task != current) {
- __this_cpu_write(printing_elapsed, 0);
+ touch_printk_offloading_watchdog();
printing_task = current;
return false;
}
@@ -611,7 +630,7 @@ static inline bool should_handoff_printing(u64 printing_start_ts)
* `offloading_threshold()' time slice.
*/
for_each_possible_cpu(cpu)
- per_cpu(printing_elapsed, cpu) = 0;
+ touch_printk_offloading_watchdog_on_cpu(cpu);
return true;
}

@@ -2083,6 +2102,8 @@ EXPORT_SYMBOL_GPL(printk_emergency_end_sync);

static bool should_handoff_printing(u64 printing_start_ts) { return false; }
static bool printk_offloading_enabled(void) { return false; }
+void touch_printk_offloading_watchdog(void) {}
+void touch_printk_offloading_watchdog_on_cpu(unsigned int cpu) {}
#endif /* CONFIG_PRINTK */

#ifdef CONFIG_EARLY_PRINTK
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 576d18045811..27b7ce1088c7 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -26,6 +26,7 @@
#include <linux/sched/clock.h>
#include <linux/sched/debug.h>
#include <linux/sched/isolation.h>
+#include <linux/console.h>

#include <asm/irq_regs.h>
#include <linux/kvm_para.h>
@@ -277,6 +278,7 @@ void touch_softlockup_watchdog_sched(void)

void touch_softlockup_watchdog(void)
{
+ touch_printk_offloading_watchdog();
touch_softlockup_watchdog_sched();
wq_watchdog_touch(raw_smp_processor_id());
}
@@ -295,8 +297,10 @@ void touch_all_softlockup_watchdogs(void)
* update as well, the only side effect might be a cycle delay for
* the softlockup check.
*/
- for_each_cpu(cpu, &watchdog_allowed_mask)
+ for_each_cpu(cpu, &watchdog_allowed_mask) {
per_cpu(watchdog_touch_ts, cpu) = 0;
+ touch_printk_offloading_watchdog_on_cpu(cpu);
+ }
wq_watchdog_touch(-1);
}

--
2.15.1