[GIT PULL] x86/timers changes for v4.8

From: Ingo Molnar
Date: Mon Jul 25 2016 - 11:42:05 EST


Linus,

Please pull the latest x86-timers-for-linus git tree from:

git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86-timers-for-linus

# HEAD: c48ec42d6eae08f55685ab660f0743ed33b9f22a x86/tsc: Remove the unused check_tsc_disabled()

The main change in this tree is the reworking, fixing and extension of the TSC
frequency enumeration code. (by Len Brown)

Thanks,

Ingo

------------------>
Len Brown (9):
Revert "x86/tsc: Add missing Cherrytrail frequency to the table"
x86/tsc_msr: Identify Intel-specific code
x86/tsc_msr: Remove debugging messages
x86/tsc_msr: Update comments, expand definitions
x86/tsc_msr: Correct Silvermont reference clock values
x86/tsc_msr: Add Airmont reference clock values
x86/tsc_msr: Remove irqoff around MSR-based TSC enumeration
x86/tsc: Enumerate SKL cpu_khz and tsc_khz via CPUID
x86/tsc: Enumerate BXT tsc_khz via CPUID

Wei Jiangang (1):
x86/tsc: Remove the unused check_tsc_disabled()


arch/x86/include/asm/tsc.h | 5 +-
arch/x86/include/asm/x86_init.h | 4 +-
arch/x86/kernel/tsc.c | 100 +++++++++++++++++++++++++++++++++-------
arch/x86/kernel/tsc_msr.c | 68 +++++++++------------------
arch/x86/kernel/x86_init.c | 1 +
5 files changed, 110 insertions(+), 68 deletions(-)

diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 7428697c5b8d..33b6365c22fe 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -35,7 +35,7 @@ extern void tsc_init(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
-extern int check_tsc_disabled(void);
+extern unsigned long native_calibrate_cpu(void);
extern unsigned long native_calibrate_tsc(void);
extern unsigned long long native_sched_clock_from_tsc(u64 tsc);

@@ -52,7 +52,6 @@ extern int notsc_setup(char *);
extern void tsc_save_sched_clock_state(void);
extern void tsc_restore_sched_clock_state(void);

-/* MSR based TSC calibration for Intel Atom SoC platforms */
-unsigned long try_msr_calibrate_tsc(void);
+unsigned long cpu_khz_from_msr(void);

#endif /* _ASM_X86_TSC_H */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 4dcdf74dfed8..08a08a800e17 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -181,7 +181,8 @@ struct x86_legacy_features {

/**
* struct x86_platform_ops - platform specific runtime functions
- * @calibrate_tsc: calibrate TSC
+ * @calibrate_cpu: calibrate CPU
+ * @calibrate_tsc: calibrate TSC, if different from CPU
* @get_wallclock: get time from HW clock like RTC etc.
* @set_wallclock: set time back to HW clock
* @is_untracked_pat_range exclude from PAT logic
@@ -200,6 +201,7 @@ struct x86_legacy_features {
* semantics.
*/
struct x86_platform_ops {
+ unsigned long (*calibrate_cpu)(void);
unsigned long (*calibrate_tsc)(void);
void (*get_wallclock)(struct timespec *ts);
int (*set_wallclock)(const struct timespec *ts);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 38ba6de56ede..a804b5ab32d0 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -239,7 +239,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
return ns;
}

-static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
+static void set_cyc2ns_scale(unsigned long khz, int cpu)
{
unsigned long long tsc_now, ns_now;
struct cyc2ns_data *data;
@@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
local_irq_save(flags);
sched_clock_idle_sleep_event();

- if (!cpu_khz)
+ if (!khz)
goto done;

data = cyc2ns_write_begin(cpu);
@@ -261,7 +261,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
* time function is continuous; see the comment near struct
* cyc2ns_data.
*/
- clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, cpu_khz,
+ clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, khz,
NSEC_PER_MSEC, 0);

/*
@@ -335,12 +335,6 @@ int check_tsc_unstable(void)
}
EXPORT_SYMBOL_GPL(check_tsc_unstable);

-int check_tsc_disabled(void)
-{
- return tsc_disabled;
-}
-EXPORT_SYMBOL_GPL(check_tsc_disabled);
-
#ifdef CONFIG_X86_TSC
int __init notsc_setup(char *str)
{
@@ -665,19 +659,77 @@ static unsigned long quick_pit_calibrate(void)
}

/**
- * native_calibrate_tsc - calibrate the tsc on boot
+ * native_calibrate_tsc
+ * Determine TSC frequency via CPUID, else return 0.
*/
unsigned long native_calibrate_tsc(void)
{
+ unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
+ unsigned int crystal_khz;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return 0;
+
+ if (boot_cpu_data.cpuid_level < 0x15)
+ return 0;
+
+ eax_denominator = ebx_numerator = ecx_hz = edx = 0;
+
+ /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
+ cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
+
+ if (ebx_numerator == 0 || eax_denominator == 0)
+ return 0;
+
+ crystal_khz = ecx_hz / 1000;
+
+ if (crystal_khz == 0) {
+ switch (boot_cpu_data.x86_model) {
+ case 0x4E: /* SKL */
+ case 0x5E: /* SKL */
+ crystal_khz = 24000; /* 24.0 MHz */
+ break;
+ case 0x5C: /* BXT */
+ crystal_khz = 19200; /* 19.2 MHz */
+ break;
+ }
+ }
+
+ return crystal_khz * ebx_numerator / eax_denominator;
+}
+
+static unsigned long cpu_khz_from_cpuid(void)
+{
+ unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return 0;
+
+ if (boot_cpu_data.cpuid_level < 0x16)
+ return 0;
+
+ eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
+
+ cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
+
+ return eax_base_mhz * 1000;
+}
+
+/**
+ * native_calibrate_cpu - calibrate the cpu on boot
+ */
+unsigned long native_calibrate_cpu(void)
+{
u64 tsc1, tsc2, delta, ref1, ref2;
unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
unsigned long flags, latch, ms, fast_calibrate;
int hpet = is_hpet_enabled(), i, loopmin;

- /* Calibrate TSC using MSR for Intel Atom SoCs */
- local_irq_save(flags);
- fast_calibrate = try_msr_calibrate_tsc();
- local_irq_restore(flags);
+ fast_calibrate = cpu_khz_from_cpuid();
+ if (fast_calibrate)
+ return fast_calibrate;
+
+ fast_calibrate = cpu_khz_from_msr();
if (fast_calibrate)
return fast_calibrate;

@@ -837,8 +889,12 @@ int recalibrate_cpu_khz(void)
if (!boot_cpu_has(X86_FEATURE_TSC))
return -ENODEV;

+ cpu_khz = x86_platform.calibrate_cpu();
tsc_khz = x86_platform.calibrate_tsc();
- cpu_khz = tsc_khz;
+ if (tsc_khz == 0)
+ tsc_khz = cpu_khz;
+ else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
+ cpu_khz = tsc_khz;
cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
cpu_khz_old, cpu_khz);

@@ -1244,8 +1300,18 @@ void __init tsc_init(void)
return;
}

+ cpu_khz = x86_platform.calibrate_cpu();
tsc_khz = x86_platform.calibrate_tsc();
- cpu_khz = tsc_khz;
+
+ /*
+ * Trust non-zero tsc_khz as authorative,
+ * and use it to sanity check cpu_khz,
+ * which will be off if system timer is off.
+ */
+ if (tsc_khz == 0)
+ tsc_khz = cpu_khz;
+ else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
+ cpu_khz = tsc_khz;

if (!tsc_khz) {
mark_tsc_unstable("could not calculate TSC khz");
@@ -1265,7 +1331,7 @@ void __init tsc_init(void)
*/
for_each_possible_cpu(cpu) {
cyc2ns_init(cpu);
- set_cyc2ns_scale(cpu_khz, cpu);
+ set_cyc2ns_scale(tsc_khz, cpu);
}

if (tsc_disabled > 0)
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 9911a0620f9a..0fe720d64fef 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -1,14 +1,5 @@
/*
- * tsc_msr.c - MSR based TSC calibration on Intel Atom SoC platforms.
- *
- * TSC in Intel Atom SoC runs at a constant rate which can be figured
- * by this formula:
- * <maximum core-clock to bus-clock ratio> * <maximum resolved frequency>
- * See Intel 64 and IA-32 System Programming Guid section 16.12 and 30.11.5
- * for details.
- * Especially some Intel Atom SoCs don't have PIT(i8254) or HPET, so MSR
- * based calibration is the only option.
- *
+ * tsc_msr.c - TSC frequency enumeration via MSR
*
* Copyright (C) 2013 Intel Corporation
* Author: Bin Gao <bin.gao@xxxxxxxxx>
@@ -22,18 +13,10 @@
#include <asm/apic.h>
#include <asm/param.h>

-/* CPU reference clock frequency: in KHz */
-#define FREQ_80 80000
-#define FREQ_83 83200
-#define FREQ_100 99840
-#define FREQ_133 133200
-#define FREQ_166 166400
-
-#define MAX_NUM_FREQS 8
+#define MAX_NUM_FREQS 9

/*
- * According to Intel 64 and IA-32 System Programming Guide,
- * if MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be
+ * If MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be
* read in MSR_PLATFORM_ID[12:8], otherwise in MSR_PERF_STAT[44:40].
* Unfortunately some Intel Atom SoCs aren't quite compliant to this,
* so we need manually differentiate SoC families. This is what the
@@ -48,17 +31,18 @@ struct freq_desc {

static struct freq_desc freq_desc_tables[] = {
/* PNW */
- { 6, 0x27, 0, { 0, 0, 0, 0, 0, FREQ_100, 0, FREQ_83 } },
+ { 6, 0x27, 0, { 0, 0, 0, 0, 0, 99840, 0, 83200 } },
/* CLV+ */
- { 6, 0x35, 0, { 0, FREQ_133, 0, 0, 0, FREQ_100, 0, FREQ_83 } },
- /* TNG */
- { 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } },
- /* VLV2 */
- { 6, 0x37, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } },
- /* ANN */
- { 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } },
- /* AIRMONT */
- { 6, 0x4c, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, FREQ_80, 0, 0, 0 } },
+ { 6, 0x35, 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200 } },
+ /* TNG - Intel Atom processor Z3400 series */
+ { 6, 0x4a, 1, { 0, 100000, 133300, 0, 0, 0, 0, 0 } },
+ /* VLV2 - Intel Atom processor E3000, Z3600, Z3700 series */
+ { 6, 0x37, 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0 } },
+ /* ANN - Intel Atom processor Z3500 series */
+ { 6, 0x5a, 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 } },
+ /* AMT - Intel Atom processor X7-Z8000 and X5-Z8000 series */
+ { 6, 0x4c, 1, { 83300, 100000, 133300, 116700,
+ 80000, 93300, 90000, 88900, 87500 } },
};

static int match_cpu(u8 family, u8 model)
@@ -79,16 +63,20 @@ static int match_cpu(u8 family, u8 model)
(freq_desc_tables[cpu_index].freqs[freq_id])

/*
- * Do MSR calibration only for known/supported CPUs.
+ * MSR-based CPU/TSC frequency discovery for certain CPUs.
*
- * Returns the calibration value or 0 if MSR calibration failed.
+ * Set global "lapic_timer_frequency" to bus_clock_cycles/jiffy
+ * Return processor base frequency in KHz, or 0 on failure.
*/
-unsigned long try_msr_calibrate_tsc(void)
+unsigned long cpu_khz_from_msr(void)
{
u32 lo, hi, ratio, freq_id, freq;
unsigned long res;
int cpu_index;

+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return 0;
+
cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model);
if (cpu_index < 0)
return 0;
@@ -100,31 +88,17 @@ unsigned long try_msr_calibrate_tsc(void)
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
ratio = (hi >> 8) & 0x1f;
}
- pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio);
-
- if (!ratio)
- goto fail;

/* Get FSB FREQ ID */
rdmsr(MSR_FSB_FREQ, lo, hi);
freq_id = lo & 0x7;
freq = id_to_freq(cpu_index, freq_id);
- pr_info("Resolved frequency ID: %u, frequency: %u KHz\n",
- freq_id, freq);
- if (!freq)
- goto fail;

/* TSC frequency = maximum resolved freq * maximum resolved bus ratio */
res = freq * ratio;
- pr_info("TSC runs at %lu KHz\n", res);

#ifdef CONFIG_X86_LOCAL_APIC
lapic_timer_frequency = (freq * 1000) / HZ;
- pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency);
#endif
return res;
-
-fail:
- pr_warn("Fast TSC calibration using MSR failed\n");
- return 0;
}
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index dad5fe9633a3..58b459296e13 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -92,6 +92,7 @@ static void default_nmi_init(void) { };
static int default_i8042_detect(void) { return 1; };

struct x86_platform_ops x86_platform = {
+ .calibrate_cpu = native_calibrate_cpu,
.calibrate_tsc = native_calibrate_tsc,
.get_wallclock = mach_get_cmos_time,
.set_wallclock = mach_set_rtc_mmss,