[RFC PATCH v6.10 4/4] x86/boot/64: Avoid intentional absolute symbol references in .head.text

From: Ard Biesheuvel
Date: Thu Mar 07 2024 - 09:32:09 EST


From: Ard Biesheuvel <ardb@xxxxxxxxxx>

The code in .head.text executes from a 1:1 mapping and cannot generally
refer to global variables using their kernel virtual addresses. However,
there are some occurrences of such references that are valid: the kernel
virtual addresses of _text and _end are needed to populate the page
tables correctly, and some other section markers are used in a similar
way.

To avoid the need for making exceptions to the rule that .head.text must
not contain any absolute symbol references, derive these addresses from
the RIP-relative 1:1 mapped physical addresses, which can be safely
determined using RIP_REL_REF().

Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx>
---
arch/x86/kernel/head64.c | 30 ++++++++++++--------
1 file changed, 18 insertions(+), 12 deletions(-)

diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 8fd80cf07691..ce1a77e26ce3 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -84,9 +84,11 @@ static inline bool check_la57_support(void)
return true;
}

-static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdval_t *pmd)
+static unsigned long __head sme_postprocess_startup(struct boot_params *bp,
+ pmdval_t *pmd,
+ unsigned long va_offset)
{
- unsigned long vaddr, vaddr_end;
+ unsigned long paddr, paddr_end;
int i;

/* Encrypt the kernel and related (if SME is active) */
@@ -99,10 +101,10 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdv
* attribute.
*/
if (sme_get_me_mask()) {
- vaddr = (unsigned long)__start_bss_decrypted;
- vaddr_end = (unsigned long)__end_bss_decrypted;
+ paddr = (unsigned long)&RIP_REL_REF(__start_bss_decrypted);
+ paddr_end = (unsigned long)&RIP_REL_REF(__end_bss_decrypted);

- for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
+ for (; paddr < paddr_end; paddr += PMD_SIZE) {
/*
* On SNP, transition the page to shared in the RMP table so that
* it is consistent with the page table attribute change.
@@ -111,11 +113,11 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdv
* mapping (kernel .text). PVALIDATE, by way of
* early_snp_set_memory_shared(), requires a valid virtual
* address but the kernel is currently running off of the identity
- * mapping so use __pa() to get a *currently* valid virtual address.
+ * mapping so use the PA to get a *currently* valid virtual address.
*/
- early_snp_set_memory_shared(__pa(vaddr), __pa(vaddr), PTRS_PER_PMD);
+ early_snp_set_memory_shared(paddr, paddr, PTRS_PER_PMD);

- i = pmd_index(vaddr);
+ i = pmd_index(paddr - va_offset);
pmd[i] -= sme_get_me_mask();
}
}
@@ -139,6 +141,7 @@ unsigned long __head __startup_64(unsigned long physaddr,
unsigned long va_offset)
{
pmd_t (*early_pgts)[PTRS_PER_PMD] = RIP_REL_REF(early_dynamic_pgts);
+ unsigned long va_text, va_end;
unsigned long pgtable_flags;
unsigned long load_delta;
pgdval_t *pgd;
@@ -165,6 +168,9 @@ unsigned long __head __startup_64(unsigned long physaddr,
if (load_delta & ~PMD_MASK)
for (;;);

+ va_text = physaddr - va_offset;
+ va_end = (unsigned long)&RIP_REL_REF(_end) - va_offset;
+
/* Include the SME encryption mask in the fixup value */
load_delta += sme_get_me_mask();

@@ -225,7 +231,7 @@ unsigned long __head __startup_64(unsigned long physaddr,
pmd_entry += sme_get_me_mask();
pmd_entry += physaddr;

- for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
+ for (i = 0; i < DIV_ROUND_UP(va_end - va_text, PMD_SIZE); i++) {
int idx = i + (physaddr >> PMD_SHIFT);

pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
@@ -250,11 +256,11 @@ unsigned long __head __startup_64(unsigned long physaddr,
pmd = &RIP_REL_REF(level2_kernel_pgt)->pmd;

/* invalidate pages before the kernel image */
- for (i = 0; i < pmd_index((unsigned long)_text); i++)
+ for (i = 0; i < pmd_index(va_text); i++)
pmd[i] &= ~_PAGE_PRESENT;

/* fixup pages that are part of the kernel image */
- for (; i <= pmd_index((unsigned long)_end); i++)
+ for (; i <= pmd_index(va_end); i++)
if (pmd[i] & _PAGE_PRESENT)
pmd[i] += load_delta;

@@ -262,7 +268,7 @@ unsigned long __head __startup_64(unsigned long physaddr,
for (; i < PTRS_PER_PMD; i++)
pmd[i] &= ~_PAGE_PRESENT;

- return sme_postprocess_startup(bp, pmd);
+ return sme_postprocess_startup(bp, pmd, va_offset);
}

/* Wipe all early page tables except for the kernel symbol map */
--
2.44.0.278.ge034bb2e1d-goog