[patch 14/14] x86/exceptions: Enable IST guard pages

From: Thomas Gleixner
Date: Sun Mar 31 2019 - 18:10:50 EST


All usage sites which expected that the exception stacks in the CPU entry
area are mapped linearly are fixed up. Enable guard pages between the
IST stacks.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
arch/x86/include/asm/cpu_entry_area.h | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)

--- a/arch/x86/include/asm/cpu_entry_area.h
+++ b/arch/x86/include/asm/cpu_entry_area.h
@@ -25,13 +25,9 @@ struct exception_stacks {
ESTACKS_MEMBERS(0)
};

-/*
- * The effective cpu entry area mapping with guard pages. Guard size is
- * zero until the code which makes assumptions about linear mapping is
- * cleaned up.
- */
+/* The effective cpu_entry_area mapping with guard pages */
struct cea_exception_stacks {
- ESTACKS_MEMBERS(0)
+ ESTACKS_MEMBERS(PAGE_SIZE)
};
#endif