[PATCH 8/9] sched/clock: Use late_initcall() instead of sched_init_smp()

From: Peter Zijlstra
Date: Fri Apr 21 2017 - 13:08:40 EST


Core2 marks its TSC unstable in ACPI Processor Idle, which is probed
after sched_init_smp(). Luckily it appears both acpi_processor and
intel_idle (which has a similar check) are mandatory built-in.

This means we can delay switching to stable until after these drivers
have ran (if they were modules, this would be impossible).

Delay the stable switch to late_initcall() to allow these drivers to
mark TSC unstable and avoid difficult stable->unstable transitions.

Cc: "Rafael J . Wysocki" <rafael.j.wysocki@xxxxxxxxx>
Reported-by: "Lofstedt, Marta" <marta.lofstedt@xxxxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
include/linux/sched/clock.h | 5 -----
kernel/sched/clock.c | 10 +++++++++-
kernel/sched/core.c | 2 --
3 files changed, 9 insertions(+), 8 deletions(-)

--- a/include/linux/sched/clock.h
+++ b/include/linux/sched/clock.h
@@ -23,10 +23,6 @@ extern u64 sched_clock_cpu(int cpu);
extern void sched_clock_init(void);

#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-static inline void sched_clock_init_late(void)
-{
-}
-
static inline void sched_clock_tick(void)
{
}
@@ -53,7 +49,6 @@ static inline u64 local_clock(void)
return sched_clock();
}
#else
-extern void sched_clock_init_late(void);
extern int sched_clock_stable(void);
extern void clear_sched_clock_stable(void);

--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -64,6 +64,7 @@
#include <linux/workqueue.h>
#include <linux/compiler.h>
#include <linux/tick.h>
+#include <linux/init.h>

/*
* Scheduler clock - returns current time in nanosec units.
@@ -203,7 +204,11 @@ void clear_sched_clock_stable(void)
__clear_sched_clock_stable();
}

-void sched_clock_init_late(void)
+/*
+ * We run this as late_initcall() such that it runs after all built-in drivers,
+ * notably: acpi_processor and intel_idle, which can mark the TSC as unstable.
+ */
+static int __init sched_clock_init_late(void)
{
sched_clock_running = 2;
/*
@@ -217,7 +222,10 @@ void sched_clock_init_late(void)

if (__sched_clock_stable_early)
__set_sched_clock_stable();
+
+ return 0;
}
+late_initcall(sched_clock_init_late);

/*
* min, max except they take wrapping into account
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5962,7 +5962,6 @@ void __init sched_init_smp(void)
init_sched_dl_class();

sched_init_smt();
- sched_clock_init_late();

sched_smp_initialized = true;
}
@@ -5978,7 +5977,6 @@ early_initcall(migration_init);
void __init sched_init_smp(void)
{
sched_init_granularity();
- sched_clock_init_late();
}
#endif /* CONFIG_SMP */