[patch 13/17] init, x86: Move mem_encrypt_init() into arch_cpu_finalize_init()

From: Thomas Gleixner
Date: Tue Jun 13 2023 - 19:41:15 EST


Invoke the X86ism mem_encrypt_init() from X86 arch_cpu_finalize_init() and
remove the weak fallback from the core code.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Tom Lendacky <thomas.lendacky@xxxxxxx>
---
arch/x86/include/asm/mem_encrypt.h | 7 ++++---
arch/x86/kernel/cpu/common.c | 11 +++++++++++
init/main.c | 13 -------------
3 files changed, 15 insertions(+), 16 deletions(-)

--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -51,6 +51,8 @@ void __init mem_encrypt_free_decrypted_m

void __init sev_es_init_vc_handling(void);

+void __init mem_encrypt_init(void);
+
#define __bss_decrypted __section(".bss..decrypted")

#else /* !CONFIG_AMD_MEM_ENCRYPT */
@@ -83,13 +85,12 @@ early_set_mem_enc_dec_hypercall(unsigned

static inline void mem_encrypt_free_decrypted_mem(void) { }

+static inline void mem_encrypt_init(void) { }
+
#define __bss_decrypted

#endif /* CONFIG_AMD_MEM_ENCRYPT */

-/* Architecture __weak replacement functions */
-void __init mem_encrypt_init(void);
-
void add_encrypt_protection_map(void);

/*
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/kprobes.h>
#include <linux/kgdb.h>
+#include <linux/mem_encrypt.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/io.h>
@@ -2417,4 +2418,14 @@ void __init arch_cpu_finalize_init(void)
} else {
fpu__init_check_bugs();
}
+
+ /*
+ * This needs to be called before any devices perform DMA
+ * operations that might use the SWIOTLB bounce buffers. It will
+ * mark the bounce buffers as decrypted so that their usage will
+ * not cause "plain-text" data to be decrypted when accessed. It
+ * must be called after late_time_init() so that Hyper-V x86/x64
+ * hypercalls work when the SWIOTLB bounce buffers are decrypted.
+ */
+ mem_encrypt_init();
}
--- a/init/main.c
+++ b/init/main.c
@@ -95,7 +95,6 @@
#include <linux/cache.h>
#include <linux/rodata_test.h>
#include <linux/jump_label.h>
-#include <linux/mem_encrypt.h>
#include <linux/kcsan.h>
#include <linux/init_syscalls.h>
#include <linux/stackdepot.h>
@@ -786,8 +785,6 @@ void __init __weak thread_stack_cache_in
}
#endif

-void __init __weak mem_encrypt_init(void) { }
-
void __init __weak poking_init(void) { }

void __init __weak pgtable_cache_init(void) { }
@@ -1043,16 +1040,6 @@ asmlinkage __visible void __init __no_sa

arch_cpu_finalize_init();

- /*
- * This needs to be called before any devices perform DMA
- * operations that might use the SWIOTLB bounce buffers. It will
- * mark the bounce buffers as decrypted so that their usage will
- * not cause "plain-text" data to be decrypted when accessed. It
- * must be called after late_time_init() so that Hyper-V x86/x64
- * hypercalls work when the SWIOTLB bounce buffers are decrypted.
- */
- mem_encrypt_init();
-
pid_idr_init();
anon_vma_init();
#ifdef CONFIG_X86