[PATCH 05/43] mm: Expand vma iterator interface.

From: Liam Howlett
Date: Tue Nov 29 2022 - 11:44:43 EST


From: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx>

Add wrappers for the maple tree to the vma iterator. This will provide
type safety at compile time.

Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx>
---
include/linux/mm.h | 46 ++++++++++++++++++++++---
include/linux/mm_types.h | 4 +--
mm/mmap.c | 74 ++++++++++++++++++++++++++++++++++++++++
3 files changed, 117 insertions(+), 7 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8bbcccbc5565..2d3a49ba2261 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -664,16 +664,16 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
static inline
struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
{
- return mas_find(&vmi->mas, max);
+ return mas_find(&vmi->mas, max - 1);
}

static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
{
/*
- * Uses vma_find() to get the first VMA when the iterator starts.
+ * Uses mas_find() to get the first VMA when the iterator starts.
* Calling mas_next() could skip the first entry.
*/
- return vma_find(vmi, ULONG_MAX);
+ return mas_find(&vmi->mas, ULONG_MAX);
}

static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
@@ -686,12 +686,50 @@ static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
return vmi->mas.index;
}

+static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
+{
+ return vmi->mas.last + 1;
+}
+static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
+ unsigned long count)
+{
+ return mas_expected_entries(&vmi->mas, count);
+}
+
+/* Free any unused preallocations */
+static inline void vma_iter_free(struct vma_iterator *vmi)
+{
+ mas_destroy(&vmi->mas);
+}
+
+static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
+ struct vm_area_struct *vma)
+{
+ vmi->mas.index = vma->vm_start;
+ vmi->mas.last = vma->vm_end - 1;
+ mas_store(&vmi->mas, vma);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void vma_iter_invalidate(struct vma_iterator *vmi)
+{
+ mas_pause(&vmi->mas);
+}
+
+static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
+{
+ mas_set(&vmi->mas, addr);
+}
+
#define for_each_vma(__vmi, __vma) \
while (((__vma) = vma_next(&(__vmi))) != NULL)

/* The MM code likes to work with exclusive end addresses */
#define for_each_vma_range(__vmi, __vma, __end) \
- while (((__vma) = vma_find(&(__vmi), (__end) - 1)) != NULL)
+ while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)

#ifdef CONFIG_SHMEM
/*
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 500e536796ca..faff2cc005c9 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -819,9 +819,7 @@ struct vma_iterator {
static inline void vma_iter_init(struct vma_iterator *vmi,
struct mm_struct *mm, unsigned long addr)
{
- vmi->mas.tree = &mm->mm_mt;
- vmi->mas.index = addr;
- vmi->mas.node = MAS_START;
+ mas_init(&vmi->mas, &mm->mm_mt, addr);
}

struct mmu_gather;
diff --git a/mm/mmap.c b/mm/mmap.c
index c3c5c1d6103d..8f24021ef5b5 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -144,6 +144,80 @@ static void remove_vma(struct vm_area_struct *vma)
vm_area_free(vma);
}

+static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
+{
+ return mas_walk(&vmi->mas);
+}
+
+static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
+ unsigned long min)
+{
+ return mas_prev(&vmi->mas, min);
+}
+
+static inline int vma_iter_prealloc(struct vma_iterator *vmi,
+ struct vm_area_struct *vma)
+{
+ return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
+}
+
+/* Store a VMA with preallocated memory */
+static inline void vma_iter_store(struct vma_iterator *vmi,
+ struct vm_area_struct *vma)
+{
+ if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.index > vma->vm_start)) {
+ printk("%lu > %lu\n", vmi->mas.index, vma->vm_start);
+ printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
+ printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last);
+ mt_dump(vmi->mas.tree);
+ }
+ if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last < vma->vm_start)) {
+ printk("%lu < %lu\n", vmi->mas.last, vma->vm_start);
+ printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
+ printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last);
+ mt_dump(vmi->mas.tree);
+ }
+
+ if (vmi->mas.node != MAS_START &&
+ ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
+ vma_iter_invalidate(vmi);
+
+ vmi->mas.index = vma->vm_start;
+ vmi->mas.last = vma->vm_end - 1;
+ mas_store_prealloc(&vmi->mas, vma);
+}
+
+static inline void vma_iter_clear(struct vma_iterator *vmi,
+ unsigned long start, unsigned long end)
+{
+ mas_set_range(&vmi->mas, start, end - 1);
+ mas_store_prealloc(&vmi->mas, NULL);
+}
+
+static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
+ struct vm_area_struct *vma, gfp_t gfp)
+{
+ vmi->mas.index = vma->vm_start;
+ vmi->mas.last = vma->vm_end - 1;
+ mas_store_gfp(&vmi->mas, vma, gfp);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
+ unsigned long start, unsigned long end, gfp_t gfp)
+{
+ vmi->mas.index = start;
+ vmi->mas.last = end - 1;
+ mas_store_gfp(&vmi->mas, NULL, gfp);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
+ return 0;
+}
+
/*
* check_brk_limits() - Use platform specific check of range & verify mlock
* limits.
--
2.35.1