[PATCH] ptrace rewrite

From: Manfred Spraul (manfreds@colorfullife.com)
Date: Fri Apr 14 2000 - 03:29:47 EST


access_process_vm() contains a few races due to the new lazy mm
semantics. I've rewritten that function, but it's only tested on i386.

Unfortunately, this implementation is vastly slower than the old code,
but I couldn't find a more efficient implementation that doesn't crash.
[ ptrace on a thread that calls daemonize(), ...]

What do you think?

--
	Manfred

// $Header$ // Kernel Version: // VERSION = 2 // PATCHLEVEL = 3 // SUBLEVEL = 99 // EXTRAVERSION = -pre5 --- 2.3/kernel/fork.c Wed Apr 12 15:00:33 2000 +++ build-2.3/kernel/fork.c Thu Apr 13 23:50:16 2000 @@ -329,6 +329,9 @@ /* * Decrement the use count and release all resources for an mm. + * + * ptrace assumes that this function is only called with the + * kernel lock held. */ void mmput(struct mm_struct *mm) { --- 2.3/kernel/ptrace.c Fri Mar 24 11:10:18 2000 +++ build-2.3/kernel/ptrace.c Fri Apr 14 10:18:54 2000 @@ -14,111 +14,48 @@ #include <asm/pgtable.h> #include <asm/uaccess.h> +#include <asm/mmu_context.h> -/* - * Access another process' address space, one page at a time. +/** + * access_process_vm - Access another process' address space + * @buf: Buffer for result, must be a kernel address + * + * Access another process' address space, the caller must have + * called task_lock() and lock_kernel(). + * */ -static int access_one_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr, void *buf, int len, int write) -{ - pgd_t * pgdir; - pmd_t * pgmiddle; - pte_t * pgtable; - unsigned long mapnr; - unsigned long maddr; - struct page *page; - -repeat: - pgdir = pgd_offset(vma->vm_mm, addr); - if (pgd_none(*pgdir)) - goto fault_in_page; - if (pgd_bad(*pgdir)) - goto bad_pgd; - pgmiddle = pmd_offset(pgdir, addr); - if (pmd_none(*pgmiddle)) - goto fault_in_page; - if (pmd_bad(*pgmiddle)) - goto bad_pmd; - pgtable = pte_offset(pgmiddle, addr); - if (!pte_present(*pgtable)) - goto fault_in_page; - mapnr = pte_pagenr(*pgtable); - if (write && (!pte_write(*pgtable) || !pte_dirty(*pgtable))) - goto fault_in_page; - page = mem_map + mapnr; - if ((mapnr >= max_mapnr) || PageReserved(page)) - return 0; - flush_cache_page(vma, addr); - - if (write) { - maddr = kmap(page); - memcpy((char *)maddr + (addr & ~PAGE_MASK), buf, len); - flush_page_to_ram(page); - flush_icache_page(vma, page); - kunmap(page); - } else { - maddr = kmap(page); - memcpy(buf, (char *)maddr + (addr & ~PAGE_MASK), len); - flush_page_to_ram(page); - kunmap(page); - } - return len; - -fault_in_page: - /* -1: out of memory. 0 - unmapped page */ - if (handle_mm_fault(tsk, vma, addr, write) > 0) - goto repeat; - return 0; - -bad_pgd: - pgd_ERROR(*pgdir); - return 0; - -bad_pmd: - pmd_ERROR(*pgmiddle); - return 0; -} - int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) { - int copied; - struct vm_area_struct * vma; + int copied, retval; + struct mm_struct *mm, *old_mm, *old_activemm; - down(&tsk->mm->mmap_sem); - vma = find_extend_vma(tsk, addr); - if (!vma) { - up(&tsk->mm->mmap_sem); + mm=tsk->mm; + if(!mm) return 0; - } - copied = 0; - for (;;) { - unsigned long offset = addr & ~PAGE_MASK; - int this_len = PAGE_SIZE - offset; - int retval; - - if (this_len > len) - this_len = len; - retval = access_one_page(tsk, vma, addr, buf, this_len, write); - copied += retval; - if (retval != this_len) - break; - - len -= retval; - if (!len) - break; - - addr += retval; - buf += retval; - - if (addr < vma->vm_end) - continue; - if (!vma->vm_next) - break; - if (vma->vm_next->vm_start != vma->vm_end) - break; + /* concurrent mmput()'s are prevented by the kernel lock */ + atomic_inc(&mm->mm_users); + + old_mm = current->mm; + old_activemm = current->active_mm; + current->mm=current->active_mm=mm; + activate_mm(old_activemm,mm); + + if(write) + retval = copy_to_user((void*)addr, buf, len); + else + retval = copy_from_user(buf, (void*)addr, len); - vma = vma->vm_next; - } - up(&tsk->mm->mmap_sem); + if(retval) + copied = 0; + else + copied = len; + + current->mm = old_mm; + current->active_mm = old_activemm; + activate_mm(mm, old_activemm); + + up(&mm->mmap_sem); + mmput(mm); return copied; }

- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.rutgers.edu Please read the FAQ at http://www.tux.org/lkml/



This archive was generated by hypermail 2b29 : Sat Apr 15 2000 - 21:00:24 EST