[RFC PATCH v1 5/8] mseal munmap

From: jeffxu
Date: Mon Oct 16 2023 - 10:39:22 EST


From: Jeff Xu <jeffxu@xxxxxxxxxx>

check seal for munmap(2).

Signed-off-by: Jeff Xu <jeffxu@xxxxxxxxxx>
---
include/linux/mm.h | 2 +-
mm/mmap.c | 22 ++++++++++++++--------
mm/mremap.c | 5 +++--
3 files changed, 18 insertions(+), 11 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index aafdb68950f8..95b793eb3a80 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3294,7 +3294,7 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long pgoff, unsigned long *populate, struct list_head *uf);
extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
- bool unlock);
+ bool unlock, enum caller_origin called);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
diff --git a/mm/mmap.c b/mm/mmap.c
index 9b6c477e713e..f4bfcc5d2c10 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2601,6 +2601,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
* @len: The length of the range to munmap
* @uf: The userfaultfd list_head
* @unlock: set to true if the user wants to drop the mmap_lock on success
+ * @called: caller origin
*
* This function takes a @mas that is either pointing to the previous VMA or set
* to MA_START and sets it up to remove the mapping(s). The @len will be
@@ -2611,7 +2612,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
*/
int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
- bool unlock)
+ bool unlock, enum caller_origin called)
{
unsigned long end;
struct vm_area_struct *vma;
@@ -2623,6 +2624,9 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
if (end == start)
return -EINVAL;

+ if (!can_modify_mm(mm, start, end, MM_ACTION_MUNMAP, called))
+ return -EACCES;
+
/* arch_unmap() might do unmaps itself. */
arch_unmap(mm, start, end);

@@ -2650,7 +2654,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
{
VMA_ITERATOR(vmi, mm, start);

- return do_vmi_munmap(&vmi, mm, start, len, uf, false);
+ return do_vmi_munmap(&vmi, mm, start, len, uf, false,
+ ON_BEHALF_OF_KERNEL);
}

unsigned long mmap_region(struct file *file, unsigned long addr,
@@ -2684,7 +2689,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
}

/* Unmap any existing mapping in the area */
- if (do_vmi_munmap(&vmi, mm, addr, len, uf, false))
+ if (do_vmi_munmap(&vmi, mm, addr, len, uf, false, ON_BEHALF_OF_KERNEL))
return -ENOMEM;

/*
@@ -2909,7 +2914,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
return error;
}

-static int __vm_munmap(unsigned long start, size_t len, bool unlock)
+static int __vm_munmap(unsigned long start, size_t len, bool unlock,
+ enum caller_origin called)
{
int ret;
struct mm_struct *mm = current->mm;
@@ -2919,7 +2925,7 @@ static int __vm_munmap(unsigned long start, size_t len, bool unlock)
if (mmap_write_lock_killable(mm))
return -EINTR;

- ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
+ ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock, called);
if (ret || !unlock)
mmap_write_unlock(mm);

@@ -2929,14 +2935,14 @@ static int __vm_munmap(unsigned long start, size_t len, bool unlock)

int vm_munmap(unsigned long start, size_t len)
{
- return __vm_munmap(start, len, false);
+ return __vm_munmap(start, len, false, ON_BEHALF_OF_KERNEL);
}
EXPORT_SYMBOL(vm_munmap);

SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
addr = untagged_addr(addr);
- return __vm_munmap(addr, len, true);
+ return __vm_munmap(addr, len, true, ON_BEHALF_OF_USERSPACE);
}


@@ -3168,7 +3174,7 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
if (ret)
goto limits_failed;

- ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
+ ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0, ON_BEHALF_OF_KERNEL);
if (ret)
goto munmap_failed;

diff --git a/mm/mremap.c b/mm/mremap.c
index 056478c106ee..e43f9ceaa29d 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -715,7 +715,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
}

vma_iter_init(&vmi, mm, old_addr);
- if (!do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false)) {
+ if (!do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false,
+ ON_BEHALF_OF_KERNEL)) {
/* OOM: unable to split vma, just get accounts right */
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
vm_acct_memory(old_len >> PAGE_SHIFT);
@@ -1009,7 +1010,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
}

ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len,
- &uf_unmap, true);
+ &uf_unmap, true, ON_BEHALF_OF_KERNEL);
if (ret)
goto out;

--
2.42.0.609.gbb76f46606-goog