[PATCH v2 14/20] mm: Make hibernate handle unmapped pages

From: Rick Edgecombe
Date: Mon Jan 28 2019 - 19:40:07 EST


For architectures with CONFIG_ARCH_HAS_SET_ALIAS, pages can be unmapped
briefly on the directmap, even when CONFIG_DEBUG_PAGEALLOC is not
configured. So this changes kernel_map_pages and kernel_page_present to be
defined when CONFIG_ARCH_HAS_SET_ALIAS is defined as well. It also changes
places (page_alloc.c) where those functions are assumed to only be
implemented when CONFIG_DEBUG_PAGEALLOC is defined.

So now when CONFIG_ARCH_HAS_SET_ALIAS=y, hibernate will handle not present
page when saving. Previously this was already done when
CONFIG_DEBUG_PAGEALLOC was configured. It does not appear to have a big
hibernating performance impact.

Before:
[ 4.670938] PM: Wrote 171996 kbytes in 0.21 seconds (819.02 MB/s)

After:
[ 4.504714] PM: Wrote 178932 kbytes in 0.22 seconds (813.32 MB/s)

Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: "Rafael J. Wysocki" <rjw@xxxxxxxxxxxxx>
Cc: Pavel Machek <pavel@xxxxxx>
Acked-by: Pavel Machek <pavel@xxxxxx>
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@xxxxxxxxx>
---
arch/x86/mm/pageattr.c | 4 ----
include/linux/mm.h | 18 ++++++------------
mm/page_alloc.c | 7 +++++--
3 files changed, 11 insertions(+), 18 deletions(-)

diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 3a51915a1410..717bdc188aab 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -2257,7 +2257,6 @@ int set_alias_default_noflush(struct page *page)
return __set_pages_p(page, 1);
}

-#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (PageHighMem(page))
@@ -2302,11 +2301,8 @@ bool kernel_page_present(struct page *page)
pte = lookup_address((unsigned long)page_address(page), &level);
return (pte_val(*pte) & _PAGE_PRESENT);
}
-
#endif /* CONFIG_HIBERNATION */

-#endif /* CONFIG_DEBUG_PAGEALLOC */
-
int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
unsigned numpages, unsigned long page_flags)
{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 80bb6408fe73..b362a280a919 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2642,37 +2642,31 @@ static inline void kernel_poison_pages(struct page *page, int numpages,
int enable) { }
#endif

-#ifdef CONFIG_DEBUG_PAGEALLOC
extern bool _debug_pagealloc_enabled;
-extern void __kernel_map_pages(struct page *page, int numpages, int enable);

static inline bool debug_pagealloc_enabled(void)
{
- return _debug_pagealloc_enabled;
+ return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && _debug_pagealloc_enabled;
}

+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_ALIAS)
+extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
{
- if (!debug_pagealloc_enabled())
- return;
-
__kernel_map_pages(page, numpages, enable);
}
#ifdef CONFIG_HIBERNATION
extern bool kernel_page_present(struct page *page);
#endif /* CONFIG_HIBERNATION */
-#else /* CONFIG_DEBUG_PAGEALLOC */
+#else /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_ALIAS */
static inline void
kernel_map_pages(struct page *page, int numpages, int enable) {}
#ifdef CONFIG_HIBERNATION
static inline bool kernel_page_present(struct page *page) { return true; }
#endif /* CONFIG_HIBERNATION */
-static inline bool debug_pagealloc_enabled(void)
-{
- return false;
-}
-#endif /* CONFIG_DEBUG_PAGEALLOC */
+#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_ALIAS */

#ifdef __HAVE_ARCH_GATE_AREA
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d295c9bc01a8..92d0a0934274 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1074,7 +1074,9 @@ static __always_inline bool free_pages_prepare(struct page *page,
}
arch_free_page(page, order);
kernel_poison_pages(page, 1 << order, 0);
- kernel_map_pages(page, 1 << order, 0);
+ if (debug_pagealloc_enabled())
+ kernel_map_pages(page, 1 << order, 0);
+
kasan_free_nondeferred_pages(page, order);

return true;
@@ -1944,7 +1946,8 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
set_page_refcounted(page);

arch_alloc_page(page, order);
- kernel_map_pages(page, 1 << order, 1);
+ if (debug_pagealloc_enabled())
+ kernel_map_pages(page, 1 << order, 1);
kernel_poison_pages(page, 1 << order, 1);
kasan_alloc_pages(page, order);
set_page_owner(page, order, gfp_flags);
--
2.17.1