[PATCH v3 15/23] x86/signal: Check if vdso_image_32 is mapped before trying to land on it

From: Dmitry Safonov
Date: Fri Jun 11 2021 - 14:04:55 EST


Provide current_has_vdso(image) helper and check it apriory landing
attempt on vdso vma.
The helper is a macro, not a static inline funciton to avoid
linux/sched/task_stack.h inclusion in asm/vdso.h.

Signed-off-by: Dmitry Safonov <dima@xxxxxxxxxx>
---
arch/x86/entry/common.c | 10 +++++++++-
arch/x86/ia32/ia32_signal.c | 4 ++--
arch/x86/include/asm/vdso.h | 4 ++++
arch/x86/kernel/signal.c | 4 ++--
4 files changed, 17 insertions(+), 5 deletions(-)

diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 7b2542b13ebd..385a1c4bf4c0 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -150,11 +150,19 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
__visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
{
+ unsigned long landing_pad;
+
+ if (!current_has_vdso(&vdso_image_32)) {
+ regs->ip = 0;
+ force_sigsegv(SIGSEGV);
+ syscall_exit_to_user_mode(regs);
+ }
+
/*
* Called using the internal vDSO SYSENTER/SYSCALL32 calling
* convention. Adjust regs so it looks like we entered using int80.
*/
- unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
+ landing_pad = (unsigned long)current->mm->context.vdso +
vdso_image_32.sym_int80_landing_pad;

/*
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index adb6994c40f6..2af40ae53a0e 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -255,7 +255,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
restorer = ksig->ka.sa.sa_restorer;
} else {
/* Return stub is in 32bit vsyscall page */
- if (current->mm->context.vdso)
+ if (current_has_vdso(&vdso_image_32))
restorer = current->mm->context.vdso +
vdso_image_32.sym___kernel_sigreturn;
else
@@ -336,7 +336,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,

if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
- else if (current->mm->context.vdso)
+ else if (current_has_vdso(&vdso_image_32))
restorer = current->mm->context.vdso +
vdso_image_32.sym___kernel_rt_sigreturn;
else
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index 98aa103eb4ab..1ea7cb3f9b14 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -45,6 +45,10 @@ extern const struct vdso_image vdso_image_x32;
extern const struct vdso_image vdso_image_32;
#endif

+#define current_has_vdso(image) \
+ (current->mm->context.vdso != 0 && \
+ current->mm->context.vdso_image == image)
+
extern void __init init_vdso_image(const struct vdso_image *image);

extern int map_vdso_once(const struct vdso_image *image, unsigned long addr);
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 988cbc634949..77496ccb812d 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -319,7 +319,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
unsafe_put_user(set->sig[1], &frame->extramask[0], Efault);
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
- else if (current->mm->context.vdso)
+ else if (current_has_vdso(&vdso_image_32))
restorer = current->mm->context.vdso +
vdso_image_32.sym___kernel_sigreturn;
else
@@ -381,7 +381,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
/* Set up to return from userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
- else if (current->mm->context.vdso)
+ else if (current_has_vdso(&vdso_image_32))
restorer = current->mm->context.vdso +
vdso_image_32.sym___kernel_rt_sigreturn;
else
--
2.31.1