[PATCH v12 21/31] mm: Introduce find_vma_rcu()

From: Laurent Dufour
Date: Tue Apr 16 2019 - 09:46:51 EST


This allows to search for a VMA structure without holding the mmap_sem.

The search is repeated while the mm seqlock is changing and until we found
a valid VMA.

While under the RCU protection, a reference is taken on the VMA, so the
caller must call put_vma() once it not more need the VMA structure.

At the time a VMA is inserted in the MM RB tree, in vma_rb_insert(), a
reference is taken to the VMA by calling get_vma().

When removing a VMA from the MM RB tree, the VMA is not release immediately
but at the end of the RCU grace period through vm_rcu_put(). This ensures
that the VMA remains allocated until the end the RCU grace period.

Since the vm_file pointer, if valid, is released in put_vma(), there is no
guarantee that the file pointer will be valid on the returned VMA.

Signed-off-by: Laurent Dufour <ldufour@xxxxxxxxxxxxx>
---
include/linux/mm_types.h | 1 +
mm/internal.h | 5 ++-
mm/mmap.c | 76 ++++++++++++++++++++++++++++++++++++++--
3 files changed, 78 insertions(+), 4 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6a6159e11a3f..9af6694cb95d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -287,6 +287,7 @@ struct vm_area_struct {

#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
atomic_t vm_ref_count;
+ struct rcu_head vm_rcu;
#endif
struct rb_node vm_rb;

diff --git a/mm/internal.h b/mm/internal.h
index 302382bed406..1e368e4afe3c 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -55,7 +55,10 @@ static inline void put_vma(struct vm_area_struct *vma)
__free_vma(vma);
}

-#else
+extern struct vm_area_struct *find_vma_rcu(struct mm_struct *mm,
+ unsigned long addr);
+
+#else /* CONFIG_SPECULATIVE_PAGE_FAULT */

static inline void get_vma(struct vm_area_struct *vma)
{
diff --git a/mm/mmap.c b/mm/mmap.c
index c106440dcae7..34bf261dc2c8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -179,6 +179,18 @@ static inline void mm_write_sequnlock(struct mm_struct *mm)
{
write_sequnlock(&mm->mm_seq);
}
+
+static void __vm_rcu_put(struct rcu_head *head)
+{
+ struct vm_area_struct *vma = container_of(head, struct vm_area_struct,
+ vm_rcu);
+ put_vma(vma);
+}
+static void vm_rcu_put(struct vm_area_struct *vma)
+{
+ VM_BUG_ON_VMA(!RB_EMPTY_NODE(&vma->vm_rb), vma);
+ call_rcu(&vma->vm_rcu, __vm_rcu_put);
+}
#else
static inline void mm_write_seqlock(struct mm_struct *mm)
{
@@ -190,6 +202,8 @@ static inline void mm_write_sequnlock(struct mm_struct *mm)

void __free_vma(struct vm_area_struct *vma)
{
+ if (IS_ENABLED(CONFIG_SPECULATIVE_PAGE_FAULT))
+ VM_BUG_ON_VMA(!RB_EMPTY_NODE(&vma->vm_rb), vma);
mpol_put(vma_policy(vma));
vm_area_free(vma);
}
@@ -197,11 +211,24 @@ void __free_vma(struct vm_area_struct *vma)
/*
* Close a vm structure and free it, returning the next.
*/
-static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
+static struct vm_area_struct *__remove_vma(struct vm_area_struct *vma)
{
struct vm_area_struct *next = vma->vm_next;

might_sleep();
+ if (IS_ENABLED(CONFIG_SPECULATIVE_PAGE_FAULT) &&
+ !RB_EMPTY_NODE(&vma->vm_rb)) {
+ /*
+ * If the VMA is still linked in the RB tree, we must release
+ * that reference by calling put_vma().
+ * This should only happen when called from exit_mmap().
+ * We forcely clear the node to satisfy the chec in
+ * __free_vma(). This is safe since the RB tree is not walked
+ * anymore.
+ */
+ RB_CLEAR_NODE(&vma->vm_rb);
+ put_vma(vma);
+ }
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (vma->vm_file)
@@ -211,6 +238,13 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
return next;
}

+static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
+{
+ if (IS_ENABLED(CONFIG_SPECULATIVE_PAGE_FAULT))
+ VM_BUG_ON_VMA(!RB_EMPTY_NODE(&vma->vm_rb), vma);
+ return __remove_vma(vma);
+}
+
static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
struct list_head *uf);
SYSCALL_DEFINE1(brk, unsigned long, brk)
@@ -475,7 +509,7 @@ static inline void vma_rb_insert(struct vm_area_struct *vma,

/* All rb_subtree_gap values must be consistent prior to insertion */
validate_mm_rb(root, NULL);
-
+ get_vma(vma);
rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
}

@@ -491,6 +525,14 @@ static void __vma_rb_erase(struct vm_area_struct *vma, struct mm_struct *mm)
mm_write_seqlock(mm);
rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
mm_write_sequnlock(mm); /* wmb */
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+ /*
+ * Ensure the removal is complete before clearing the node.
+ * Matched by vma_has_changed()/handle_speculative_fault().
+ */
+ RB_CLEAR_NODE(&vma->vm_rb);
+ vm_rcu_put(vma);
+#endif
}

static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
@@ -2331,6 +2373,34 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)

EXPORT_SYMBOL(find_vma);

+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+/*
+ * Like find_vma() but under the protection of RCU and the mm sequence counter.
+ * The vma returned has to be relaesed by the caller through the call to
+ * put_vma()
+ */
+struct vm_area_struct *find_vma_rcu(struct mm_struct *mm, unsigned long addr)
+{
+ struct vm_area_struct *vma = NULL;
+ unsigned int seq;
+
+ do {
+ if (vma)
+ put_vma(vma);
+
+ seq = read_seqbegin(&mm->mm_seq);
+
+ rcu_read_lock();
+ vma = find_vma(mm, addr);
+ if (vma)
+ get_vma(vma);
+ rcu_read_unlock();
+ } while (read_seqretry(&mm->mm_seq, seq));
+
+ return vma;
+}
+#endif
+
/*
* Same as find_vma, but also return a pointer to the previous VMA in *pprev.
*/
@@ -3231,7 +3301,7 @@ void exit_mmap(struct mm_struct *mm)
while (vma) {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
- vma = remove_vma(vma);
+ vma = __remove_vma(vma);
}
vm_unacct_memory(nr_accounted);
}
--
2.21.0