[PATCH] x86 : Add NX protection for kernel data on 64 bit

From: Matthieu CASTET
Date: Mon Jan 24 2011 - 17:12:45 EST


This fix the cpu hotplug support, by allocating dedicated page table
for ident mapping in trampoline.
This is need because kernel set NX flag in level3_ident_pgt and
level3_kernel_pgt, and made it unusable from trampoline.

We also set the Low kernel Mapping to NX.

Finaly we apply nx in free_init_pages only when we switch to NX mode in order
to preserve large page mapping.

mapping now look like :
---[ Low Kernel Mapping ]---
0xffff880000000000-0xffff880000200000 2M RW GLB NX pte
0xffff880000200000-0xffff880001000000 14M RW PSE GLB NX pmd
0xffff880001000000-0xffff880001200000 2M ro PSE GLB NX pmd
0xffff880001200000-0xffff8800012ae000 696K ro GLB NX pte
0xffff8800012ae000-0xffff880001400000 1352K RW GLB NX pte
0xffff880001400000-0xffff880001503000 1036K ro GLB NX pte
0xffff880001503000-0xffff880001600000 1012K RW GLB NX pte
0xffff880001600000-0xffff880007e00000 104M RW PSE GLB NX pmd
0xffff880007e00000-0xffff880007ffd000 2036K RW GLB NX pte
0xffff880007ffd000-0xffff880008000000 12K pte
0xffff880008000000-0xffff880040000000 896M pmd
0xffff880040000000-0xffff888000000000 511G pud
0xffff888000000000-0xffffc90000000000 66048G pgd
---[ vmalloc() Area ]---
[...]
---[ High Kernel Mapping ]---
0xffffffff80000000-0xffffffff81000000 16M pmd
0xffffffff81000000-0xffffffff81400000 4M ro PSE GLB x pmd
0xffffffff81400000-0xffffffff81600000 2M ro PSE GLB NX pmd
0xffffffff81600000-0xffffffff81800000 2M RW PSE GLB NX pmd
0xffffffff81800000-0xffffffffa0000000 488M pmd
---[ Modules ]---

Signed-off-by: Matthieu CASTET <castet.matthieu@xxxxxxx>
---
arch/x86/kernel/head_64.S | 18 ++++++++++++++++++
arch/x86/kernel/trampoline_64.S | 4 ++--
arch/x86/mm/init.c | 6 ++++--
arch/x86/mm/init_64.c | 6 +++++-
4 files changed, 29 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 239046b..47f56dc 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -139,6 +139,9 @@ ident_complete:
#ifdef CONFIG_X86_TRAMPOLINE
addq %rbp, trampoline_level4_pgt + 0(%rip)
addq %rbp, trampoline_level4_pgt + (511*8)(%rip)
+
+ addq %rbp, trampoline_level3_ident_pgt + 0(%rip)
+ addq %rbp, trampoline_level3_ident_pgt + (L3_START_KERNEL*8)(%rip)
#endif

/* Due to ENTRY(), sometimes the empty space gets filled with
@@ -396,6 +399,21 @@ NEXT_PAGE(level2_kernel_pgt)
NEXT_PAGE(level2_spare_pgt)
.fill 512, 8, 0

+#ifdef CONFIG_X86_TRAMPOLINE
+NEXT_PAGE(trampoline_level3_ident_pgt)
+ .quad trampoline_level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .fill L3_START_KERNEL-1,8,0
+ .quad trampoline_level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .fill 511-L3_START_KERNEL,8,0
+
+
+NEXT_PAGE(trampoline_level2_ident_pgt)
+ /* Since I easily can, map the first 1G.
+ * Don't set NX because code runs from these pages.
+ */
+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
+#endif
+
#undef PMDS
#undef NEXT_PAGE

diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
index 075d130..a408935 100644
--- a/arch/x86/kernel/trampoline_64.S
+++ b/arch/x86/kernel/trampoline_64.S
@@ -160,8 +160,8 @@ trampoline_stack:
.org 0x1000
trampoline_stack_end:
ENTRY(trampoline_level4_pgt)
- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .quad trampoline_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
.fill 510,8,0
- .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .quad trampoline_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE

ENTRY(trampoline_end)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 947f42a..58d173b 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -366,8 +366,10 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
* we are going to free part of that, we need to make that
* writeable and non-executable first.
*/
- set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
- set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
+ if (kernel_set_to_readonly) {
+ set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
+ set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
+ }

printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);

diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 71a5929..3840ecf 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -788,6 +788,7 @@ void mark_rodata_ro(void)
unsigned long rodata_start =
((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
unsigned long end = (unsigned long) &__end_rodata_hpage_align;
+ unsigned long kernel_end = (((unsigned long)&__init_end + HPAGE_SIZE) & HPAGE_MASK);
unsigned long text_end = PAGE_ALIGN((unsigned long) &__stop___ex_table);
unsigned long rodata_end = PAGE_ALIGN((unsigned long) &__end_rodata);
unsigned long data_start = (unsigned long) &_sdata;
@@ -798,11 +799,14 @@ void mark_rodata_ro(void)

kernel_set_to_readonly = 1;

+ /* make low level mapping NX */
+ set_memory_nx(PAGE_OFFSET, (PMD_PAGE_SIZE*PTRS_PER_PMD) >> PAGE_SHIFT);
+
/*
* The rodata section (but not the kernel text!) should also be
* not-executable.
*/
- set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
+ set_memory_nx(rodata_start, (kernel_end - rodata_start) >> PAGE_SHIFT);

rodata_test();

--
1.7.2.3


--------------040900090307050208080104--
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/