[patch-2.3.99-pre6-7] do_munmap_mm(mm, addr, len)

From: Tigran Aivazian (tigran@veritas.com)
Date: Wed Apr 26 2000 - 08:08:38 EST


Hi Linus,

This patch makes it possible to unmap someone else's mmap'd area and not
just current's. This kills two valdshneps with one shot:

a) makes some code cleaner and possibly more optimized where the value of
tsk->mm (tsk = current) is needed anyway so there no point in wasting
cycles inside do_munmap() dereferencing current->mm again.

b) being able to unmap some other context's mmap'd area is a good thing -
e.g. can be used by forced umount to forcibly unmap all mmap'd files for a
given process.

I had some doubts on whether call to flush_tlb_range() should be affected
but having had a brief look at arch-specific implementation thereof, they
seem to do the right thing internally so we should be ok.

A copy is on:

  http://www.ocston.org/~tigran/patches/munmap-2.3.99-pre6-7.patch

Regards,
Tigran

diff -urN -X dontdiff linux/include/linux/mm.h munmap/include/linux/mm.h
--- linux/include/linux/mm.h Wed Apr 26 09:41:01 2000
+++ munmap/include/linux/mm.h Wed Apr 26 13:52:29 2000
@@ -445,7 +445,12 @@
         return ret;
 }
 
-extern int do_munmap(unsigned long, size_t);
+extern int do_munmap_mm(struct mm_struct *, unsigned long, size_t);
+extern inline int do_munmap(unsigned long addr, size_t len)
+{
+ return do_munmap_mm(current->mm, addr, len);
+}
+
 extern unsigned long do_brk(unsigned long, unsigned long);
 
 struct zone_t;
diff -urN -X dontdiff linux/kernel/ksyms.c munmap/kernel/ksyms.c
--- linux/kernel/ksyms.c Wed Apr 26 09:41:01 2000
+++ munmap/kernel/ksyms.c Wed Apr 26 13:45:11 2000
@@ -91,7 +91,7 @@
 
 /* process memory management */
 EXPORT_SYMBOL(do_mmap_pgoff);
-EXPORT_SYMBOL(do_munmap);
+EXPORT_SYMBOL(do_munmap_mm);
 EXPORT_SYMBOL(do_brk);
 EXPORT_SYMBOL(exit_mm);
 EXPORT_SYMBOL(exit_files);
diff -urN -X dontdiff linux/mm/mmap.c munmap/mm/mmap.c
--- linux/mm/mmap.c Wed Apr 26 09:41:01 2000
+++ munmap/mm/mmap.c Wed Apr 26 13:51:20 2000
@@ -110,7 +110,7 @@
 
         /* Always allow shrinking brk. */
         if (brk <= mm->brk) {
- if (!do_munmap(newbrk, oldbrk-newbrk))
+ if (!do_munmap_mm(mm, newbrk, oldbrk-newbrk))
                         goto set_brk;
                 goto out;
         }
@@ -281,7 +281,7 @@
 
         /* Clear old maps */
         error = -ENOMEM;
- if (do_munmap(addr, len))
+ if (do_munmap_mm(mm, addr, len))
                 goto free_vma;
 
         /* Check against address space limit. */
@@ -319,7 +319,7 @@
                 if (error)
                         goto unmap_and_free_vma;
         } else if (flags & MAP_SHARED) {
- error = map_zero_setup (vma);
+ error = map_zero_setup(vma);
         }
 
         /*
@@ -517,8 +517,9 @@
  * allocate a new one, and the return indicates whether the old
  * area was reused.
  */
-static struct vm_area_struct * unmap_fixup(struct vm_area_struct *area,
- unsigned long addr, size_t len, struct vm_area_struct *extra)
+static struct vm_area_struct * unmap_fixup(struct mm_struct *mm,
+ struct vm_area_struct *area, unsigned long addr, size_t len,
+ struct vm_area_struct *extra)
 {
         struct vm_area_struct *mpnt;
         unsigned long end = addr + len;
@@ -540,11 +541,11 @@
         /* Work out to one of the ends. */
         if (end == area->vm_end) {
                 area->vm_end = addr;
- vmlist_modify_lock(current->mm);
+ vmlist_modify_lock(mm);
         } else if (addr == area->vm_start) {
                 area->vm_pgoff += (end - area->vm_start) >> PAGE_SHIFT;
                 area->vm_start = end;
- vmlist_modify_lock(current->mm);
+ vmlist_modify_lock(mm);
         } else {
         /* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */
                 /* Add end mapping -- leave beginning for below */
@@ -566,12 +567,12 @@
                 if (mpnt->vm_ops && mpnt->vm_ops->open)
                         mpnt->vm_ops->open(mpnt);
                 area->vm_end = addr; /* Truncate area */
- vmlist_modify_lock(current->mm);
- insert_vm_struct(current->mm, mpnt);
+ vmlist_modify_lock(mm);
+ insert_vm_struct(mm, mpnt);
         }
 
- insert_vm_struct(current->mm, area);
- vmlist_modify_unlock(current->mm);
+ insert_vm_struct(mm, area);
+ vmlist_modify_unlock(mm);
         return extra;
 }
 
@@ -638,9 +639,8 @@
  * work. This now handles partial unmappings.
  * Jeremy Fitzhardine <jeremy@sw.oz.au>
  */
-int do_munmap(unsigned long addr, size_t len)
+int do_munmap_mm(struct mm_struct *mm, unsigned long addr, size_t len)
 {
- struct mm_struct * mm;
         struct vm_area_struct *mpnt, *prev, **npp, *free, *extra;
 
         if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
@@ -654,7 +654,6 @@
          * every area affected in some way (by any overlap) is put
          * on the list. If nothing is put on, nothing is affected.
          */
- mm = current->mm;
         mpnt = find_vma_prev(mm, addr, &prev);
         if (!mpnt)
                 return 0;
@@ -717,7 +716,7 @@
                 /*
                  * Fix the mapping, and free the old area if it wasn't reused.
                  */
- extra = unmap_fixup(mpnt, st, size, extra);
+ extra = unmap_fixup(mm, mpnt, st, size, extra);
         }
 
         /* Release the extra vma struct if it wasn't used */
@@ -732,10 +731,11 @@
 asmlinkage long sys_munmap(unsigned long addr, size_t len)
 {
         int ret;
+ struct mm_struct *mm = current->mm;
 
- down(&current->mm->mmap_sem);
- ret = do_munmap(addr, len);
- up(&current->mm->mmap_sem);
+ down(&mm->mmap_sem);
+ ret = do_munmap_mm(mm, addr, len);
+ up(&mm->mmap_sem);
         return ret;
 }
 
@@ -767,7 +767,7 @@
         /*
          * Clear old maps. this also does some error checking for us
          */
- retval = do_munmap(addr, len);
+ retval = do_munmap_mm(mm, addr, len);
         if (retval != 0)
                 return retval;
 

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/



This archive was generated by hypermail 2b29 : Sun Apr 30 2000 - 21:00:11 EST