[PATCH] mm: hugetlb: undo change to page mapcount in fault handler

From: Hillf Danton
Date: Thu Dec 22 2011 - 08:36:45 EST


Page mapcount is changed only when it is folded into page table entry.

Cc: Michal Hocko <mhocko@xxxxxxx>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@xxxxxxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Hillf Danton <dhillf@xxxxxxxxx>
---

--- a/mm/hugetlb.c Tue Dec 20 21:26:30 2011
+++ b/mm/hugetlb.c Thu Dec 22 21:29:42 2011
@@ -2509,6 +2509,7 @@ static int hugetlb_no_page(struct mm_str
{
struct hstate *h = hstate_vma(vma);
int ret = VM_FAULT_SIGBUS;
+ int anon_rmap = 0;
pgoff_t idx;
unsigned long size;
struct page *page;
@@ -2563,14 +2564,13 @@ retry:
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h);
spin_unlock(&inode->i_lock);
- page_dup_rmap(page);
} else {
lock_page(page);
if (unlikely(anon_vma_prepare(vma))) {
ret = VM_FAULT_OOM;
goto backout_unlocked;
}
- hugepage_add_new_anon_rmap(page, vma, address);
+ anon_rmap = 1;
}
} else {
/*
@@ -2583,7 +2583,6 @@ retry:
VM_FAULT_SET_HINDEX(h - hstates);
goto backout_unlocked;
}
- page_dup_rmap(page);
}

/*
@@ -2607,6 +2606,10 @@ retry:
if (!huge_pte_none(huge_ptep_get(ptep)))
goto backout;

+ if (anon_rmap)
+ hugepage_add_new_anon_rmap(page, vma, address);
+ else
+ page_dup_rmap(page);
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/