[PATCH 5.4 20/66] x86/entry/32: Use %ss segment where required

From: Greg Kroah-Hartman
Date: Wed Nov 27 2019 - 16:13:19 EST


From: Andy Lutomirski <luto@xxxxxxxxxx>

commit 4c4fd55d3d59a41ddfa6ecba7e76928921759f43 upstream.

When re-building the IRET frame we use %eax as an destination %esp,
make sure to then also match the segment for when there is a nonzero
SS base (ESPFIX).

[peterz: Changelog and minor edits]
Fixes: 3c88c692c287 ("x86/stackframe/32: Provide consistent pt_regs")
Signed-off-by: Andy Lutomirski <luto@xxxxxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Cc: stable@xxxxxxxxxx
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
arch/x86/entry/entry_32.S | 19 ++++++++++++++-----
1 file changed, 14 insertions(+), 5 deletions(-)

--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -210,6 +210,8 @@
/*
* The high bits of the CS dword (__csh) are used for CS_FROM_*.
* Clear them in case hardware didn't do this for us.
+ *
+ * Be careful: we may have nonzero SS base due to ESPFIX.
*/
andl $0x0000ffff, 3*4(%esp)

@@ -263,6 +265,13 @@
.endm

.macro IRET_FRAME
+ /*
+ * We're called with %ds, %es, %fs, and %gs from the interrupted
+ * frame, so we shouldn't use them. Also, we may be in ESPFIX
+ * mode and therefore have a nonzero SS base and an offset ESP,
+ * so any attempt to access the stack needs to use SS. (except for
+ * accesses through %esp, which automatically use SS.)
+ */
testl $CS_FROM_KERNEL, 1*4(%esp)
jz .Lfinished_frame_\@

@@ -276,20 +285,20 @@
movl 5*4(%esp), %eax # (modified) regs->sp

movl 4*4(%esp), %ecx # flags
- movl %ecx, -4(%eax)
+ movl %ecx, %ss:-1*4(%eax)

movl 3*4(%esp), %ecx # cs
andl $0x0000ffff, %ecx
- movl %ecx, -8(%eax)
+ movl %ecx, %ss:-2*4(%eax)

movl 2*4(%esp), %ecx # ip
- movl %ecx, -12(%eax)
+ movl %ecx, %ss:-3*4(%eax)

movl 1*4(%esp), %ecx # eax
- movl %ecx, -16(%eax)
+ movl %ecx, %ss:-4*4(%eax)

popl %ecx
- lea -16(%eax), %esp
+ lea -4*4(%eax), %esp
popl %eax
.Lfinished_frame_\@:
.endm