[RFC PATCH 23/26] x86/kvm: Add worker to trigger runtime patching

From: Ankur Arora
Date: Wed Apr 08 2020 - 01:06:21 EST


Make __pv_init_lock_hash() conditional on either paravirt spinlocks
being enabled (via kvm_pv_spinlock()) or if paravirt spinlocks
might get enabled (runtime patching via CONFIG_PARAVIRT_RUNTIME.)

Also add a handler for CPUID reprobe which can trigger this patching.

Signed-off-by: Ankur Arora <ankur.a.arora@xxxxxxxxxx>
---
arch/x86/kernel/kvm.c | 34 +++++++++++++++++++++++++++++-----
1 file changed, 29 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 31f5ecfd3907..1cb7eab805a6 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -35,6 +35,7 @@
#include <asm/hypervisor.h>
#include <asm/tlb.h>
#include <asm/cpuidle_haltpoll.h>
+#include <asm/text-patching.h>

static int kvmapf = 1;

@@ -909,12 +910,15 @@ void __init kvm_spinlock_init(void)
if (num_possible_cpus() == 1)
return;

- if (!kvm_pv_spinlock())
- return;
-
- __pv_init_lock_hash();
+ /*
+ * Allocate if pv_spinlocks are enabled or if we might
+ * end up patching them in later.
+ */
+ if (kvm_pv_spinlock() || IS_ENABLED(CONFIG_PARAVIRT_RUNTIME))
+ __pv_init_lock_hash();
}
-
+#else /* !CONFIG_PARAVIRT_SPINLOCKS */
+static inline bool kvm_pv_spinlock(void) { return false; }
#endif /* CONFIG_PARAVIRT_SPINLOCKS */

#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
@@ -952,3 +956,23 @@ void arch_haltpoll_disable(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
#endif
+
+#ifdef CONFIG_PARAVIRT_RUNTIME
+void kvm_trigger_reprobe_cpuid(struct work_struct *work)
+{
+ mutex_lock(&text_mutex);
+
+ paravirt_stage_zero();
+
+ kvm_pv_steal_clock();
+ kvm_pv_tlb();
+ paravirt_runtime_patch(false);
+
+ paravirt_stage_zero();
+
+ kvm_pv_spinlock();
+ paravirt_runtime_patch(true);
+
+ mutex_unlock(&text_mutex);
+}
+#endif /* CONFIG_PARAVIRT_RUNTIME */
--
2.20.1