[PATCH RFC 08/43] x86/boot/64: Adapt assembly for PIE support

From: Hou Wenlong
Date: Fri Apr 28 2023 - 05:52:51 EST


From: Thomas Garnier <thgarnie@xxxxxxxxxxxx>

From: Thomas Garnier <thgarnie@xxxxxxxxxxxx>

Change the assembly code to use absolute reference for transition
between address spaces and relative references when referencing global
variables in the same address space. Ensure the kernel built with PIE
references the correct addresses based on context.

[Hou Wenlong: Adapt new assembly code and remove change for
initial_code(%rip)]

Signed-off-by: Thomas Garnier <thgarnie@xxxxxxxxxxxx>
Co-developed-by: Hou Wenlong <houwenlong.hwl@xxxxxxxxxxxx>
Signed-off-by: Hou Wenlong <houwenlong.hwl@xxxxxxxxxxxx>
Cc: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx>
Cc: Kees Cook <keescook@xxxxxxxxxxxx>
---
arch/x86/kernel/head_64.S | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index a5df3e994f04..21f0556d3ac0 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -114,7 +114,8 @@ SYM_CODE_START_NOALIGN(startup_64)
popq %rsi

/* Form the CR3 value being sure to include the CR3 modifier */
- addq $(early_top_pgt - __START_KERNEL_map), %rax
+ movabs $(early_top_pgt - __START_KERNEL_map), %rcx
+ addq %rcx, %rax
jmp 1f
SYM_CODE_END(startup_64)

@@ -156,13 +157,14 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
* added to the initial pgdir entry that will be programmed into CR3.
*/
#ifdef CONFIG_AMD_MEM_ENCRYPT
- movq sme_me_mask, %rax
+ movq sme_me_mask(%rip), %rax
#else
xorq %rax, %rax
#endif

/* Form the CR3 value being sure to include the CR3 modifier */
- addq $(init_top_pgt - __START_KERNEL_map), %rax
+ movabs $(init_top_pgt - __START_KERNEL_map), %rcx
+ addq %rcx, %rax
1:

#ifdef CONFIG_X86_MCE
@@ -226,7 +228,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
movq %rax, %cr4

/* Ensure I am executing from virtual addresses */
- movq $1f, %rax
+ movabs $1f, %rax
ANNOTATE_RETPOLINE_SAFE
jmp *%rax
1:
@@ -237,7 +239,8 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
movl smpboot_control(%rip), %ecx

/* Get the per cpu offset for the given CPU# which is in ECX */
- movq __per_cpu_offset(,%rcx,8), %rdx
+ leaq __per_cpu_offset(%rip), %rdx
+ movq (%rdx,%rcx,8), %rdx
#else
xorl %edx, %edx /* zero-extended to clear all of RDX */
#endif /* CONFIG_SMP */
@@ -248,7 +251,8 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
*
* RDX contains the per-cpu offset
*/
- movq pcpu_hot + X86_current_task(%rdx), %rax
+ leaq (pcpu_hot + X86_current_task)(%rip), %rax
+ movq (%rdx,%rax,1), %rax
movq TASK_threadsp(%rax), %rsp

/*
@@ -259,7 +263,8 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
*/
subq $16, %rsp
movw $(GDT_SIZE-1), (%rsp)
- leaq gdt_page(%rdx), %rax
+ leaq gdt_page(%rip), %rax
+ addq %rdx, %rax
movq %rax, 2(%rsp)
lgdt (%rsp)
addq $16, %rsp
@@ -362,7 +367,8 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
* REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
* address given in m16:64.
*/
- pushq $.Lafter_lret # put return address on stack for unwinder
+ movabs $.Lafter_lret, %rax
+ pushq %rax # put return address on stack for unwinder
xorl %ebp, %ebp # clear frame pointer
movq initial_code(%rip), %rax
pushq $__KERNEL_CS # set correct cs
--
2.31.1