[PATCH 29/29] arm64/mm: attempt speculative mm faults first

From: Michel Lespinasse
Date: Fri Apr 30 2021 - 15:53:55 EST


Attempt speculative mm fault handling first, and fall back to the
existing (non-speculative) code if that fails.

This follows the lines of the x86 speculative fault handling code,
but with some minor arch differences such as the way that the
VM_FAULT_BADACCESS case is handled.

Signed-off-by: Michel Lespinasse <michel@xxxxxxxxxxxxxx>
---
arch/arm64/mm/fault.c | 63 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 63 insertions(+)

diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index f37d4e3830b7..f2d09f8e4bc2 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -25,6 +25,7 @@
#include <linux/perf_event.h>
#include <linux/preempt.h>
#include <linux/hugetlb.h>
+#include <linux/vm_event_item.h>

#include <asm/acpi.h>
#include <asm/bug.h>
@@ -530,6 +531,11 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
unsigned long vm_flags = VM_ACCESS_FLAGS;
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
unsigned long addr = untagged_addr(far);
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+ struct vm_area_struct *vma;
+ struct vm_area_struct pvma;
+ unsigned long seq;
+#endif

if (kprobe_page_fault(regs, esr))
return 0;
@@ -564,6 +570,60 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr,

perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);

+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+
+ /*
+ * No need to try speculative faults for kernel or
+ * single threaded user space.
+ */
+ if (!(mm_flags & FAULT_FLAG_USER) || atomic_read(&mm->mm_users) == 1)
+ goto no_spf;
+
+ count_vm_event(SPF_ATTEMPT);
+ seq = mmap_seq_read_start(mm);
+ if (seq & 1) {
+ count_vm_spf_event(SPF_ABORT_ODD);
+ goto spf_abort;
+ }
+ rcu_read_lock();
+ vma = find_vma(mm, addr);
+ if (!vma || vma->vm_start > addr) {
+ rcu_read_unlock();
+ count_vm_spf_event(SPF_ABORT_UNMAPPED);
+ goto spf_abort;
+ }
+ if (!vma_is_anonymous(vma)) {
+ rcu_read_unlock();
+ count_vm_spf_event(SPF_ABORT_NO_SPECULATE);
+ goto spf_abort;
+ }
+ pvma = *vma;
+ rcu_read_unlock();
+ if (!mmap_seq_read_check(mm, seq, SPF_ABORT_VMA_COPY))
+ goto spf_abort;
+ vma = &pvma;
+ if (!(vma->vm_flags & vm_flags)) {
+ count_vm_spf_event(SPF_ABORT_ACCESS_ERROR);
+ goto spf_abort;
+ }
+ fault = do_handle_mm_fault(vma, addr & PAGE_MASK,
+ mm_flags | FAULT_FLAG_SPECULATIVE, seq, regs);
+
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ goto no_context;
+ return 0;
+ }
+ if (!(fault & VM_FAULT_RETRY))
+ goto done;
+
+spf_abort:
+ count_vm_event(SPF_ABORT);
+no_spf:
+
+#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
+
/*
* As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code,
@@ -604,6 +664,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
}
}
mmap_read_unlock(mm);
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+done:
+#endif

/*
* Handle the "normal" (no error) case first.
--
2.20.1