[PATCH 7/9] KVM: arm64: Fix constant-pool users in hyp

From: David Brazdil
Date: Wed Dec 09 2020 - 08:19:56 EST


Hyp code uses absolute addressing to obtain a kimg VA of a small number
of kernel symbols. Since the kernel now converts constant pool addresses
to hyp VAs, this trick does not work anymore.

Change the helpers to convert from hyp VA back to kimg VA or PA, as
needed and rework the callers accordingly.

Signed-off-by: David Brazdil <dbrazdil@xxxxxxxxxx>
---
arch/arm64/include/asm/kvm_mmu.h | 42 ++++++++++++------------------
arch/arm64/kvm/hyp/nvhe/host.S | 29 +++++++++++----------
arch/arm64/kvm/hyp/nvhe/hyp-init.S | 2 --
3 files changed, 31 insertions(+), 42 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 6bbb44011c84..adadc468cc71 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -73,49 +73,39 @@ alternative_cb_end
.endm

/*
- * Convert a kernel image address to a PA
- * reg: kernel address to be converted in place
+ * Convert a hypervisor VA to a PA
+ * reg: hypervisor address to be converted in place
* tmp: temporary register
- *
- * The actual code generation takes place in kvm_get_kimage_voffset, and
- * the instructions below are only there to reserve the space and
- * perform the register allocation (kvm_get_kimage_voffset uses the
- * specific registers encoded in the instructions).
*/
-.macro kimg_pa reg, tmp
-alternative_cb kvm_get_kimage_voffset
- movz \tmp, #0
- movk \tmp, #0, lsl #16
- movk \tmp, #0, lsl #32
- movk \tmp, #0, lsl #48
-alternative_cb_end
-
- /* reg = __pa(reg) */
- sub \reg, \reg, \tmp
+.macro hyp_pa reg, tmp
+ ldr_l \tmp, hyp_physvirt_offset
+ add \reg, \reg, \tmp
.endm

/*
- * Convert a kernel image address to a hyp VA
- * reg: kernel address to be converted in place
+ * Convert a hypervisor VA to a kernel image address
+ * reg: hypervisor address to be converted in place
* tmp: temporary register
*
* The actual code generation takes place in kvm_get_kimage_voffset, and
* the instructions below are only there to reserve the space and
- * perform the register allocation (kvm_update_kimg_phys_offset uses the
+ * perform the register allocation (kvm_get_kimage_voffset uses the
* specific registers encoded in the instructions).
*/
-.macro kimg_hyp_va reg, tmp
-alternative_cb kvm_update_kimg_phys_offset
+.macro hyp_kimg_va reg, tmp
+ /* Convert hyp VA -> PA. */
+ hyp_pa \reg, \tmp
+
+ /* Load kimage_voffset. */
+alternative_cb kvm_get_kimage_voffset
movz \tmp, #0
movk \tmp, #0, lsl #16
movk \tmp, #0, lsl #32
movk \tmp, #0, lsl #48
alternative_cb_end

- sub \reg, \reg, \tmp
- mov_q \tmp, PAGE_OFFSET
- orr \reg, \reg, \tmp
- kern_hyp_va \reg
+ /* Convert PA -> kimg VA. */
+ add \reg, \reg, \tmp
.endm

#else
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index a820dfdc9c25..6585a7cbbc56 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -74,27 +74,28 @@ SYM_FUNC_END(__host_enter)
* void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
*/
SYM_FUNC_START(__hyp_do_panic)
- /* Load the format arguments into x1-7 */
- mov x6, x3
- get_vcpu_ptr x7, x3
-
- mrs x3, esr_el2
- mrs x4, far_el2
- mrs x5, hpfar_el2
-
/* Prepare and exit to the host's panic funciton. */
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h)
msr spsr_el2, lr
ldr lr, =panic
+ hyp_kimg_va lr, x6
msr elr_el2, lr

- /*
- * Set the panic format string and enter the host, conditionally
- * restoring the host context.
- */
+ /* Set the panic format string. Use the, now free, LR as scratch. */
+ ldr lr, =__hyp_panic_string
+ hyp_kimg_va lr, x6
+
+ /* Load the format arguments into x1-7. */
+ mov x6, x3
+ get_vcpu_ptr x7, x3
+ mrs x3, esr_el2
+ mrs x4, far_el2
+ mrs x5, hpfar_el2
+
+ /* Enter the host, conditionally restoring the host context. */
cmp x0, xzr
- ldr x0, =__hyp_panic_string
+ mov x0, lr
b.eq __host_enter_without_restoring
b __host_enter_for_panic
SYM_FUNC_END(__hyp_do_panic)
@@ -124,7 +125,7 @@ SYM_FUNC_END(__hyp_do_panic)
* Preserve x0-x4, which may contain stub parameters.
*/
ldr x5, =__kvm_handle_stub_hvc
- kimg_pa x5, x6
+ hyp_pa x5, x6
br x5
.L__vect_end\@:
.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index 68fd64f2313e..99b408fe09ee 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -139,7 +139,6 @@ alternative_else_nop_endif

/* Set the host vector */
ldr x0, =__kvm_hyp_host_vector
- kimg_hyp_va x0, x1
msr vbar_el2, x0

ret
@@ -198,7 +197,6 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
/* Leave idmap. */
mov x0, x29
ldr x1, =kvm_host_psci_cpu_entry
- kimg_hyp_va x1, x2
br x1
SYM_CODE_END(__kvm_hyp_init_cpu)

--
2.29.2.576.ga3fc446d84-goog