[PATCH v9 15/18] KVM: x86: Add guest support for detecting and enabling SEV Live Migration feature.

From: Ashish Kalra
Date: Tue Dec 08 2020 - 17:10:01 EST


From: Ashish Kalra <ashish.kalra@xxxxxxx>

The guest support for detecting and enabling SEV Live migration
feature uses the following logic :

- kvm_init_plaform() checks if its booted under the EFI

- If not EFI,

i) check for the KVM_FEATURE_CPUID

ii) if CPUID reports that migration is support then issue wrmsrl
to enable the SEV migration support

- If EFI,

i) Check the KVM_FEATURE_CPUID.

ii) If CPUID reports that migration is supported, then reads the UEFI enviroment variable which
indicates OVMF support for live migration.

iii) If variable is set then wrmsr to enable the SEV migration support.

The EFI live migration check is done using a late_initcall() callback.

Signed-off-by: Ashish Kalra <ashish.kalra@xxxxxxx>
---
arch/x86/include/asm/mem_encrypt.h | 11 ++++++
arch/x86/kernel/kvm.c | 62 ++++++++++++++++++++++++++++++
arch/x86/mm/mem_encrypt.c | 11 ++++++
3 files changed, 84 insertions(+)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 2f62bbdd9d12..83012af1660c 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -21,6 +21,7 @@
extern u64 sme_me_mask;
extern u64 sev_status;
extern bool sev_enabled;
+extern bool sev_live_mig_enabled;

void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
unsigned long decrypted_kernel_vaddr,
@@ -43,6 +44,8 @@ void __init sme_enable(struct boot_params *bp);

int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
+void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,
+ bool enc);

void __init mem_encrypt_free_decrypted_mem(void);

@@ -59,6 +62,7 @@ bool sev_es_active(void);
#else /* !CONFIG_AMD_MEM_ENCRYPT */

#define sme_me_mask 0ULL
+#define sev_live_mig_enabled false

static inline void __init sme_early_encrypt(resource_size_t paddr,
unsigned long size) { }
@@ -82,6 +86,8 @@ static inline int __init
early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
static inline int __init
early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
+static inline void __init
+early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {}

static inline void mem_encrypt_free_decrypted_mem(void) { }

@@ -110,6 +116,11 @@ static inline u64 sme_get_me_mask(void)
return sme_me_mask;
}

+static inline bool sev_live_migration_enabled(void)
+{
+ return sev_live_mig_enabled;
+}
+
#endif /* __ASSEMBLY__ */

#endif /* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 7f57ede3cb8e..7da8b6b3528c 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -26,6 +26,7 @@
#include <linux/kprobes.h>
#include <linux/nmi.h>
#include <linux/swait.h>
+#include <linux/efi.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include <asm/traps.h>
@@ -429,6 +430,53 @@ static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
early_set_memory_decrypted((unsigned long) ptr, size);
}

+#ifdef CONFIG_EFI
+static bool setup_kvm_sev_migration(void)
+{
+ efi_char16_t efi_Sev_Live_Mig_support_name[] = L"SevLiveMigrationEnabled";
+ efi_guid_t efi_variable_guid = MEM_ENCRYPT_GUID;
+ efi_status_t status;
+ unsigned long size;
+ bool enabled;
+
+ if (!sev_live_migration_enabled())
+ return false;
+
+ size = sizeof(enabled);
+
+ if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
+ pr_info("setup_kvm_sev_migration: no efi\n");
+ return false;
+ }
+
+ /* Get variable contents into buffer */
+ status = efi.get_variable(efi_Sev_Live_Mig_support_name,
+ &efi_variable_guid, NULL, &size, &enabled);
+
+ if (status == EFI_NOT_FOUND) {
+ pr_info("setup_kvm_sev_migration: variable not found\n");
+ return false;
+ }
+
+ if (status != EFI_SUCCESS) {
+ pr_info("setup_kvm_sev_migration: get_variable fail\n");
+ return false;
+ }
+
+ if (enabled == 0) {
+ pr_info("setup_kvm_sev_migration: live migration disabled in OVMF\n");
+ return false;
+ }
+
+ pr_info("setup_kvm_sev_migration: live migration enabled in OVMF\n");
+ wrmsrl(MSR_KVM_SEV_LIVE_MIG_EN, KVM_SEV_LIVE_MIGRATION_ENABLED);
+
+ return true;
+}
+
+late_initcall(setup_kvm_sev_migration);
+#endif
+
/*
* Iterate through all possible CPUs and map the memory region pointed
* by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
@@ -742,6 +790,20 @@ static void __init kvm_apic_init(void)

static void __init kvm_init_platform(void)
{
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ if (sev_active() &&
+ kvm_para_has_feature(KVM_FEATURE_SEV_LIVE_MIGRATION)) {
+ printk(KERN_INFO "KVM enable live migration\n");
+ sev_live_mig_enabled = true;
+ /*
+ * If not booted using EFI, enable Live migration support.
+ */
+ if (!efi_enabled(EFI_BOOT))
+ wrmsrl(MSR_KVM_SEV_LIVE_MIG_EN,
+ KVM_SEV_LIVE_MIGRATION_ENABLED);
+ } else
+ printk(KERN_INFO "KVM enable live migration feature unsupported\n");
+#endif
kvmclock_init();
x86_platform.apic_post_init = kvm_apic_init;
}
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 9d1ac65050d0..cc1a4c762149 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -48,6 +48,8 @@ EXPORT_SYMBOL_GPL(sev_enable_key);

bool sev_enabled __section(".data");

+bool sev_live_mig_enabled __section(".data");
+
/* Buffer used for early in-place encryption by BSP, no locking needed */
static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);

@@ -206,6 +208,9 @@ static void set_memory_enc_dec_hypercall(unsigned long vaddr, int npages,
unsigned long sz = npages << PAGE_SHIFT;
unsigned long vaddr_end, vaddr_next;

+ if (!sev_live_migration_enabled())
+ return;
+
vaddr_end = vaddr + sz;

for (; vaddr < vaddr_end; vaddr = vaddr_next) {
@@ -376,6 +381,12 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
return early_set_memory_enc_dec(vaddr, size, true);
}

+void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,
+ bool enc)
+{
+ set_memory_enc_dec_hypercall(vaddr, npages, enc);
+}
+
/*
* SME and SEV are very similar but they are not the same, so there are
* times that the kernel will need to distinguish between SME and SEV. The
--
2.17.1