[PATCH 6/6] pagemap: introduce data structure for pagemap entry

From: Naoya Horiguchi
Date: Thu Jan 12 2012 - 14:30:41 EST


Currently a local variable of pagemap entry in pagemap_pte_range()
is named pfn and typed with u64, but it's not correct (pfn should
be unsigned long.)
This patch introduces special type for pagemap entry and replace
code with it.

Signed-off-by: Naoya Horiguchi <n-horiguchi@xxxxxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---
fs/proc/task_mmu.c | 66 +++++++++++++++++++++++++++------------------------
1 files changed, 35 insertions(+), 31 deletions(-)

diff --git 3.2-rc5.orig/fs/proc/task_mmu.c 3.2-rc5/fs/proc/task_mmu.c
index 5bf4ccf..1fa6c81 100644
--- 3.2-rc5.orig/fs/proc/task_mmu.c
+++ 3.2-rc5/fs/proc/task_mmu.c
@@ -587,9 +587,13 @@ const struct file_operations proc_clear_refs_operations = {
.llseek = noop_llseek,
};

+typedef struct {
+ u64 pme;
+} pme_t;
+
struct pagemapread {
int pos, len;
- u64 *buffer;
+ pme_t *buffer;
};

#define PAGEMAP_WALK_SIZE (PMD_SIZE)
@@ -612,10 +616,15 @@ struct pagemapread {
#define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
#define PM_END_OF_BUFFER 1

-static int add_to_pagemap(unsigned long addr, u64 pfn,
+static inline pme_t make_pme(u64 val)
+{
+ return (pme_t) { .pme = val };
+}
+
+static int add_to_pagemap(unsigned long addr, pme_t *pme,
struct pagemapread *pm)
{
- pm->buffer[pm->pos++] = pfn;
+ pm->buffer[pm->pos++] = *pme;
if (pm->pos >= pm->len)
return PM_END_OF_BUFFER;
return 0;
@@ -627,8 +636,10 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end,
struct pagemapread *pm = walk->private;
unsigned long addr;
int err = 0;
+ pme_t pme = make_pme(PM_NOT_PRESENT);
+
for (addr = start; addr < end; addr += PAGE_SIZE) {
- err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
+ err = add_to_pagemap(addr, &pme, pm);
if (err)
break;
}
@@ -641,36 +652,31 @@ static u64 swap_pte_to_pagemap_entry(pte_t pte)
return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
}

-static u64 pte_to_pagemap_entry(pte_t pte)
+static void pte_to_pagemap_entry(pme_t *pme, pte_t pte)
{
- u64 pme = 0;
if (is_swap_pte(pte))
- pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
- | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
+ *pme = make_pme(PM_PFRAME(swap_pte_to_pagemap_entry(pte))
+ | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP);
else if (pte_present(pte))
- pme = PM_PFRAME(pte_pfn(pte))
- | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
- return pme;
+ *pme = make_pme(PM_PFRAME(pte_pfn(pte))
+ | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static u64 thp_pte_to_pagemap_entry(pte_t pte, int offset)
+static void thp_pte_to_pagemap_entry(pme_t *pme, pte_t pte, int offset)
{
- u64 pme = 0;
/*
* Currently pte for thp is always present because thp can not be
* swapped-out, migrated, or HWPOISONed (split in such cases instead.)
* This if-check is just to prepare for future implementation.
*/
if (pte_present(pte))
- pme = PM_PFRAME(pte_pfn(pte) + offset)
- | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
- return pme;
+ *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)
+ | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
}
#else
-static inline u64 thp_pte_to_pagemap_entry(pte_t pte, int offset)
+static inline void thp_pte_to_pagemap_entry(pme_t *pme, pte_t pte, int offset)
{
- return 0;
}
#endif

@@ -681,7 +687,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct pagemapread *pm = walk->private;
pte_t *pte;
int err = 0;
- u64 pfn = PM_NOT_PRESENT;
+ pme_t pme = make_pme(PM_NOT_PRESENT);

/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -691,8 +697,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
for (; addr != end; addr += PAGE_SIZE) {
unsigned long offset = (addr & ~PAGEMAP_WALK_MASK)
>> PAGE_SHIFT;
- pfn = thp_pte_to_pagemap_entry(huge_pte, offset);
- err = add_to_pagemap(addr, pfn, pm);
+ thp_pte_to_pagemap_entry(&pme, huge_pte, offset);
+ err = add_to_pagemap(addr, &pme, pm);
if (err)
break;
}
@@ -711,11 +717,11 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (vma && (vma->vm_start <= addr) &&
!is_vm_hugetlb_page(vma)) {
pte = pte_offset_map(pmd, addr);
- pfn = pte_to_pagemap_entry(*pte);
+ pte_to_pagemap_entry(&pme, *pte);
/* unmap before userspace copy */
pte_unmap(pte);
}
- err = add_to_pagemap(addr, pfn, pm);
+ err = add_to_pagemap(addr, &pme, pm);
if (err)
return err;
}
@@ -726,13 +732,11 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
}

#ifdef CONFIG_HUGETLB_PAGE
-static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
+static void huge_pte_to_pagemap_entry(pme_t *pme, pte_t pte, int offset)
{
- u64 pme = 0;
if (pte_present(pte))
- pme = PM_PFRAME(pte_pfn(pte) + offset)
- | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
- return pme;
+ *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)
+ | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
}

/* This function walks within one hugetlb entry in the single call */
@@ -742,12 +746,12 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
{
struct pagemapread *pm = walk->private;
int err = 0;
- u64 pfn;
+ pme_t pme = make_pme(PM_NOT_PRESENT);

for (; addr != end; addr += PAGE_SIZE) {
int offset = (addr & ~hmask) >> PAGE_SHIFT;
- pfn = huge_pte_to_pagemap_entry(*pte, offset);
- err = add_to_pagemap(addr, pfn, pm);
+ huge_pte_to_pagemap_entry(&pme, *pte, offset);
+ err = add_to_pagemap(addr, &pme, pm);
if (err)
return err;
}
--
1.7.6.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/