[PATCH 4.18 05/22] x86/mm/init: Add helper for freeing kernel image pages

From: Greg Kroah-Hartman
Date: Thu Aug 23 2018 - 05:11:38 EST


4.18-stable review patch. If anyone has any objections, please let me know.

------------------

From: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>

commit 6ea2738e0ca0e626c75202fb051c1e88d7a950fa upstream.

When chunks of the kernel image are freed, free_init_pages() is used
directly. Consolidate the three sites that do this. Also update the
string to give an incrementally better description of that memory versus
what was there before.

Signed-off-by: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: keescook@xxxxxxxxxx
Cc: aarcange@xxxxxxxxxx
Cc: jgross@xxxxxxxx
Cc: jpoimboe@xxxxxxxxxx
Cc: gregkh@xxxxxxxxxxxxxxxxxxx
Cc: peterz@xxxxxxxxxxxxx
Cc: hughd@xxxxxxxxxx
Cc: torvalds@xxxxxxxxxxxxxxxxxxxx
Cc: bp@xxxxxxxxx
Cc: luto@xxxxxxxxxx
Cc: ak@xxxxxxxxxxxxxxx
Cc: Kees Cook <keescook@xxxxxxxxxx>
Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx>
Cc: Juergen Gross <jgross@xxxxxxxx>
Cc: Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
Cc: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Hugh Dickins <hughd@xxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Andi Kleen <ak@xxxxxxxxxxxxxxx>
Link: https://lkml.kernel.org/r/20180802225829.FE0E32EA@xxxxxxxxxxxxxxxxxx
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
arch/x86/include/asm/processor.h | 1 +
arch/x86/mm/init.c | 15 ++++++++++++---
arch/x86/mm/init_64.c | 4 ++--
3 files changed, 15 insertions(+), 5 deletions(-)

--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -971,6 +971,7 @@ static inline uint32_t hypervisor_cpuid_

extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+extern void free_kernel_image_pages(void *begin, void *end);

void default_idle(void);
#ifdef CONFIG_XEN
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -775,13 +775,22 @@ void free_init_pages(char *what, unsigne
}
}

+/*
+ * begin/end can be in the direct map or the "high kernel mapping"
+ * used for the kernel image only. free_init_pages() will do the
+ * right thing for either kind of address.
+ */
+void free_kernel_image_pages(void *begin, void *end)
+{
+ free_init_pages("unused kernel image",
+ (unsigned long)begin, (unsigned long)end);
+}
+
void __ref free_initmem(void)
{
e820__reallocate_tables();

- free_init_pages("unused kernel",
- (unsigned long)(&__init_begin),
- (unsigned long)(&__init_end));
+ free_kernel_image_pages(&__init_begin, &__init_end);
}

#ifdef CONFIG_BLK_DEV_INITRD
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1283,8 +1283,8 @@ void mark_rodata_ro(void)
set_memory_ro(start, (end-start) >> PAGE_SHIFT);
#endif

- free_init_pages("unused kernel", text_end, rodata_start);
- free_init_pages("unused kernel", rodata_end, _sdata);
+ free_kernel_image_pages((void *)text_end, (void *)rodata_start);
+ free_kernel_image_pages((void *)rodata_end, (void *)_sdata);

debug_checkwx();