[PATCH 08/14] KVM: arm64: Protect page table traversal with RCU

From: Oliver Upton
Date: Tue Aug 30 2022 - 15:42:57 EST


The use of RCU is necessary to change the paging structures in parallel.
Acquire and release an RCU read lock when traversing the page tables.

Signed-off-by: Oliver Upton <oliver.upton@xxxxxxxxx>
---
arch/arm64/include/asm/kvm_pgtable.h | 19 ++++++++++++++++++-
arch/arm64/kvm/hyp/pgtable.c | 7 ++++++-
2 files changed, 24 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 78fbb7be1af6..7d2de0a98ccb 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -578,9 +578,26 @@ enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
*/
enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);

+#if defined(__KVM_NVHE_HYPERVISOR___)
+
+static inline void kvm_pgtable_walk_begin(void) {}
+static inline void kvm_pgtable_walk_end(void) {}
+
+#define kvm_dereference_ptep rcu_dereference_raw
+
+#else /* !defined(__KVM_NVHE_HYPERVISOR__) */
+
+#define kvm_pgtable_walk_begin rcu_read_lock
+#define kvm_pgtable_walk_end rcu_read_unlock
+#define kvm_dereference_ptep rcu_dereference
+
+#endif /* defined(__KVM_NVHE_HYPERVISOR__) */
+
static inline kvm_pte_t kvm_pte_read(kvm_pte_t *ptep)
{
- return READ_ONCE(*ptep);
+ kvm_pte_t __rcu *p = (kvm_pte_t __rcu *)ptep;
+
+ return READ_ONCE(*kvm_dereference_ptep(p));
}

#endif /* __ARM64_KVM_PGTABLE_H__ */
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index f911509e6512..215a14c434ed 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -284,8 +284,13 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
.end = PAGE_ALIGN(walk_data.addr + size),
.walker = walker,
};
+ int r;

- return _kvm_pgtable_walk(&walk_data);
+ kvm_pgtable_walk_begin();
+ r = _kvm_pgtable_walk(&walk_data);
+ kvm_pgtable_walk_end();
+
+ return r;
}

struct leaf_walk_data {
--
2.37.2.672.g94769d06f0-goog