Re: [PATCH 07/39] x86/entry/32: Enter the kernel via trampoline stack

From: Brian Gerst
Date: Wed Jul 18 2018 - 14:10:08 EST


On Wed, Jul 18, 2018 at 5:41 AM Joerg Roedel <joro@xxxxxxxxxx> wrote:
>
> From: Joerg Roedel <jroedel@xxxxxxx>
>
> Use the entry-stack as a trampoline to enter the kernel. The
> entry-stack is already in the cpu_entry_area and will be
> mapped to userspace when PTI is enabled.
>
> Signed-off-by: Joerg Roedel <jroedel@xxxxxxx>
> ---
> arch/x86/entry/entry_32.S | 119 ++++++++++++++++++++++++++++++++-------
> arch/x86/include/asm/switch_to.h | 14 ++++-
> arch/x86/kernel/asm-offsets.c | 1 +
> arch/x86/kernel/cpu/common.c | 5 +-
> arch/x86/kernel/process.c | 2 -
> arch/x86/kernel/process_32.c | 2 -
> 6 files changed, 115 insertions(+), 28 deletions(-)
>
> diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
> index 7251c4f..fea49ec 100644
> --- a/arch/x86/entry/entry_32.S
> +++ b/arch/x86/entry/entry_32.S
> @@ -154,7 +154,7 @@
>
> #endif /* CONFIG_X86_32_LAZY_GS */
>
> -.macro SAVE_ALL pt_regs_ax=%eax
> +.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0
> cld
> PUSH_GS
> pushl %fs
> @@ -173,6 +173,12 @@
> movl $(__KERNEL_PERCPU), %edx
> movl %edx, %fs
> SET_KERNEL_GS %edx
> +
> + /* Switch to kernel stack if necessary */
> +.if \switch_stacks > 0
> + SWITCH_TO_KERNEL_STACK
> +.endif
> +
> .endm
>
> /*
> @@ -269,6 +275,73 @@
> .Lend_\@:
> #endif /* CONFIG_X86_ESPFIX32 */
> .endm
> +
> +
> +/*
> + * Called with pt_regs fully populated and kernel segments loaded,
> + * so we can access PER_CPU and use the integer registers.
> + *
> + * We need to be very careful here with the %esp switch, because an NMI
> + * can happen everywhere. If the NMI handler finds itself on the
> + * entry-stack, it will overwrite the task-stack and everything we
> + * copied there. So allocate the stack-frame on the task-stack and
> + * switch to it before we do any copying.
> + */
> +.macro SWITCH_TO_KERNEL_STACK
> +
> + ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
> +
> + /* Are we on the entry stack? Bail out if not! */
> + movl PER_CPU_VAR(cpu_entry_area), %ecx
> + addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
> + subl %esp, %ecx /* ecx = (end of entry_stack) - esp */
> + cmpl $SIZEOF_entry_stack, %ecx
> + jae .Lend_\@
> +
> + /* Load stack pointer into %esi and %edi */
> + movl %esp, %esi
> + movl %esi, %edi
> +
> + /* Move %edi to the top of the entry stack */
> + andl $(MASK_entry_stack), %edi
> + addl $(SIZEOF_entry_stack), %edi
> +
> + /* Load top of task-stack into %edi */
> + movl TSS_entry2task_stack(%edi), %edi
> +
> + /* Bytes to copy */
> + movl $PTREGS_SIZE, %ecx
> +
> +#ifdef CONFIG_VM86
> + testl $X86_EFLAGS_VM, PT_EFLAGS(%esi)
> + jz .Lcopy_pt_regs_\@
> +
> + /*
> + * Stack-frame contains 4 additional segment registers when
> + * coming from VM86 mode
> + */
> + addl $(4 * 4), %ecx
> +
> +.Lcopy_pt_regs_\@:
> +#endif
> +
> + /* Allocate frame on task-stack */
> + subl %ecx, %edi
> +
> + /* Switch to task-stack */
> + movl %edi, %esp
> +
> + /*
> + * We are now on the task-stack and can safely copy over the
> + * stack-frame
> + */
> + shrl $2, %ecx

This shift can be removed if you divide the constants by 4 above.
Ditto on the exit path in the next patch.

--
Brian Gerst