[tip:x86/timers] x86/tsc: Verify TSC_ADJUST from idle

From: tip-bot for Thomas Gleixner
Date: Tue Nov 29 2016 - 11:52:05 EST


Commit-ID: cea7d48122b134e64d9617cc0a0ad5d53a407ea9
Gitweb: http://git.kernel.org/tip/cea7d48122b134e64d9617cc0a0ad5d53a407ea9
Author: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
AuthorDate: Sat, 19 Nov 2016 13:47:37 +0000
Committer: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
CommitDate: Tue, 29 Nov 2016 17:29:47 +0100

x86/tsc: Verify TSC_ADJUST from idle

When entering idle, it's a good oportunity to verify that the TSC_ADJUST
MSR has not been tampered with (BIOS hiding SMM cycles). If tampering is
detected, emit a warning and restore it to the previous value.

This is especially important for machines, which mark the TSC reliable
because there is no watchdog clocksource available (SoCs).

This is not sufficient for HPC (NOHZ_FULL) situations where a CPU never
goes idle, but adding a timer to do the check periodically is not an option
either. On a machine, which has this issue, the check triggeres right
during boot, so there is a decent chance that the sysadmin will notice.

Rate limit the check to once per second and warn only once per cpu.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Reviewed-by: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Yinghai Lu <yinghai@xxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Link: http://lkml.kernel.org/r/20161119134017.732180441@xxxxxxxxxxxxx
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>

---
arch/x86/include/asm/tsc.h | 2 ++
arch/x86/kernel/process.c | 1 +
arch/x86/kernel/tsc_sync.c | 37 +++++++++++++++++++++++++++++++++++--
3 files changed, 38 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 1ec0595..b896e9e 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -50,8 +50,10 @@ extern void check_tsc_sync_target(void);

#ifdef CONFIG_X86_TSC
extern void tsc_store_and_check_tsc_adjust(void);
+extern void tsc_verify_tsc_adjust(void);
#else
static inline void tsc_store_and_check_tsc_adjust(void) { }
+static inline void tsc_verify_tsc_adjust(void) { }
#endif

extern int notsc_setup(char *);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 0888a87..4fe5dc8 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -277,6 +277,7 @@ void exit_idle(void)

void arch_cpu_idle_enter(void)
{
+ tsc_verify_tsc_adjust();
local_touch_nmi();
enter_idle();
}
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index e4b2c04..f9c291e 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -22,12 +22,42 @@
#include <asm/tsc.h>

struct tsc_adjust {
- s64 bootval;
- s64 adjusted;
+ s64 bootval;
+ s64 adjusted;
+ unsigned long nextcheck;
+ bool warned;
};

static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);

+void tsc_verify_tsc_adjust(void)
+{
+ struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust);
+ s64 curval;
+
+ if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
+ return;
+
+ /* Rate limit the MSR check */
+ if (time_before(jiffies, adj->nextcheck))
+ return;
+
+ adj->nextcheck = jiffies + HZ;
+
+ rdmsrl(MSR_IA32_TSC_ADJUST, curval);
+ if (adj->adjusted == curval)
+ return;
+
+ /* Restore the original value */
+ wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted);
+
+ if (!adj->warned) {
+ pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n",
+ smp_processor_id(), adj->adjusted, curval);
+ adj->warned = true;
+ }
+}
+
#ifndef CONFIG_SMP
void __init tsc_store_and_check_tsc_adjust(void)
{
@@ -40,6 +70,7 @@ void __init tsc_store_and_check_tsc_adjust(void)
rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
cur->bootval = bootval;
cur->adjusted = bootval;
+ cur->nextcheck = jiffies + HZ;
pr_info("TSC ADJUST: Boot CPU%u: %lld\n",cpu, bootval);
}

@@ -59,6 +90,8 @@ void tsc_store_and_check_tsc_adjust(void)

rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
cur->bootval = bootval;
+ cur->nextcheck = jiffies + HZ;
+ cur->warned = false;

/*
* Check whether this CPU is the first in a package to come up. In