[tip: x86/boot] x86/decompressor: Merge trampoline cleanup with switching code

From: tip-bot2 for Ard Biesheuvel
Date: Tue Aug 08 2023 - 12:36:47 EST


The following commit has been merged into the x86/boot branch of tip:

Commit-ID: 03dda95137d3247564854ad9032c0354273a159d
Gitweb: https://git.kernel.org/tip/03dda95137d3247564854ad9032c0354273a159d
Author: Ard Biesheuvel <ardb@xxxxxxxxxx>
AuthorDate: Mon, 07 Aug 2023 18:27:12 +02:00
Committer: Borislav Petkov (AMD) <bp@xxxxxxxxx>
CommitterDate: Mon, 07 Aug 2023 20:51:17 +02:00

x86/decompressor: Merge trampoline cleanup with switching code

Now that the trampoline setup code and the actual invocation of it are
all done from the C routine, the trampoline cleanup can be merged into
it as well, instead of returning to asm just to call another C function.

Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx>
Signed-off-by: Borislav Petkov (AMD) <bp@xxxxxxxxx>
Acked-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx>
Link: https://lore.kernel.org/r/20230807162720.545787-16-ardb@xxxxxxxxxx
---
arch/x86/boot/compressed/head_64.S | 14 ++++----------
arch/x86/boot/compressed/pgtable_64.c | 18 ++++--------------
2 files changed, 8 insertions(+), 24 deletions(-)

diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index afdaf8c..fb0e562 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -430,20 +430,14 @@ SYM_CODE_START(startup_64)
* a trampoline in 32-bit addressable memory if the current number does
* not match the desired number.
*
- * Pass the boot_params pointer as the first argument.
+ * Pass the boot_params pointer as the first argument. The second
+ * argument is the relocated address of the page table to use instead
+ * of the page table in trampoline memory (if required).
*/
movq %r15, %rdi
+ leaq rva(top_pgtable)(%rbx), %rsi
call configure_5level_paging

- /*
- * cleanup_trampoline() would restore trampoline memory.
- *
- * RDI is address of the page table to use instead of page table
- * in trampoline memory (if required).
- */
- leaq rva(top_pgtable)(%rbx), %rdi
- call cleanup_trampoline
-
/* Zero EFLAGS */
pushq $0
popfq
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index eab4e6b..7939eb6 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -101,7 +101,7 @@ static unsigned long find_trampoline_placement(void)
return bios_start - TRAMPOLINE_32BIT_SIZE;
}

-asmlinkage void configure_5level_paging(struct boot_params *bp)
+asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable)
{
void (*toggle_la57)(void *cr3);
bool l5_required = false;
@@ -191,22 +191,12 @@ asmlinkage void configure_5level_paging(struct boot_params *bp)
}

toggle_la57(trampoline_32bit);
-}
-
-void cleanup_trampoline(void *pgtable)
-{
- void *trampoline_pgtable;
-
- trampoline_pgtable = trampoline_32bit;

/*
- * Move the top level page table out of trampoline memory,
- * if it's there.
+ * Move the top level page table out of trampoline memory.
*/
- if ((void *)__native_read_cr3() == trampoline_pgtable) {
- memcpy(pgtable, trampoline_pgtable, PAGE_SIZE);
- native_write_cr3((unsigned long)pgtable);
- }
+ memcpy(pgtable, trampoline_32bit, PAGE_SIZE);
+ native_write_cr3((unsigned long)pgtable);

/* Restore trampoline memory */
memcpy(trampoline_32bit, trampoline_save, TRAMPOLINE_32BIT_SIZE);