[patch 11/24] x86/speculation: Rework SMT state change

From: Thomas Gleixner
Date: Wed Nov 21 2018 - 15:19:47 EST


arch_smt_update() is only called when the sysfs SMT control knob is
changed. This means that when SMT is enabled in the sysfs control knob the
system is considered to have SMT active even if all siblings are offline.

To allow finegrained control of the speculation mitigations, the actual SMT
state is more interesting than the fact that siblings could be enabled.

Rework the code, so arch_smt_update() is invoked from each individual CPU
hotplug function, and simplify the update function while at it.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
arch/x86/kernel/cpu/bugs.c | 11 +++++------
include/linux/sched/topology.h | 4 ++++
kernel/cpu.c | 14 ++++++++------
3 files changed, 17 insertions(+), 12 deletions(-)

--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/nospec.h>
#include <linux/prctl.h>
+#include <linux/sched/topology.h>

#include <asm/spec-ctrl.h>
#include <asm/cmdline.h>
@@ -344,16 +345,14 @@ void arch_smt_update(void)
return;

mutex_lock(&spec_ctrl_mutex);
- mask = x86_spec_ctrl_base;
- if (cpu_smt_control == CPU_SMT_ENABLED)
+
+ mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
+ if (sched_smt_active())
mask |= SPEC_CTRL_STIBP;
- else
- mask &= ~SPEC_CTRL_STIBP;

if (mask != x86_spec_ctrl_base) {
pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
- cpu_smt_control == CPU_SMT_ENABLED ?
- "Enabling" : "Disabling");
+ mask & SPEC_CTRL_STIBP ? "Enabling" : "Disabling");
x86_spec_ctrl_base = mask;
on_each_cpu(update_stibp_msr, NULL, 1);
}
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -226,8 +226,12 @@ static inline bool cpus_share_cache(int
return true;
}

+static inline bool sched_smt_active(void) { return false; }
+
#endif /* !CONFIG_SMP */

+void arch_smt_update(void);
+
static inline int task_node(const struct task_struct *p)
{
return cpu_to_node(task_cpu(p));
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -367,6 +367,12 @@ static void lockdep_release_cpus_lock(vo

#endif /* CONFIG_HOTPLUG_CPU */

+/*
+ * Architectures that need SMT-specific errata handling during SMT hotplug
+ * should override this.
+ */
+void __weak arch_smt_update(void) { }
+
#ifdef CONFIG_HOTPLUG_SMT
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
EXPORT_SYMBOL_GPL(cpu_smt_control);
@@ -1011,6 +1017,7 @@ static int __ref _cpu_down(unsigned int
* concurrent CPU hotplug via cpu_add_remove_lock.
*/
lockup_detector_cleanup();
+ arch_smt_update();
return ret;
}

@@ -1139,6 +1146,7 @@ static int _cpu_up(unsigned int cpu, int
ret = cpuhp_up_callbacks(cpu, st, target);
out:
cpus_write_unlock();
+ arch_smt_update();
return ret;
}

@@ -2055,12 +2063,6 @@ static void cpuhp_online_cpu_device(unsi
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
}

-/*
- * Architectures that need SMT-specific errata handling during SMT hotplug
- * should override this.
- */
-void __weak arch_smt_update(void) { };
-
static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
{
int cpu, ret = 0;