[PATCH 2/4] x86: Prepare vm86 tasks to handle User-Mode Instruction Prevention

From: Ricardo Neri
Date: Tue Nov 08 2016 - 01:14:01 EST


User-Mode Instruction Prevention (UMIP) is a security feature in new Intel
processors that causes a general protection exception if certain
instructions are executed in user mode (CPL > 0).

Unfortunately, some of the instructions that are protected by UMIP (i.e.,
SGDT, SIDT and SMSW) are used by certain applications running in virtual-
8086 mode (e.g., DOSEMU and Wine). Thus, UMIP needs to be disabled in
virtual-8086 tasks for such applications to run correctly. However,
unconditionally disabling UMIP for virtual-8086 tasks could be abused by
malicious applcations. Hence, UMIP can only be disabled for this particular
kind of tasks if requested at boot time via vm86_disable_x86_umip.

If disabling UMIP is allowed, it is done in the following two code paths:
1) entering virtual-8086 mode via a system call, and 2) task switch. When
For task-switching a new member is added to struct vm86 to keep track of
the UMIP disabling selection; set in the vm86 system call as per the the
selection made at boot time.

If supported by the CPU, UMIP is re-enabled as soon as we exit virtual-8086
mode via interrupt/exception or task switch. To determine that we switch to
a virtual-8086 mode task, we rely in the fact that virtual-8086 mode tasks
keep a copy of the value of the supervisor mode stack pointer prior to
entering in virtual-8086 mode.

Since the X86_UMIP config option is not defined yet, this code remains
dormant until such option is enabled in a subsequent patch. Such patch will
also introduce code to disable UMIP for virtual-8086 tasks via a kernel
parameter.

Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxx>
Cc: Brian Gerst <brgerst@xxxxxxxxx>
Cc: Chen Yucong <slaoub@xxxxxxxxx>
Cc: Chris Metcalf <cmetcalf@xxxxxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: Fenghua Yu <fenghua.yu@xxxxxxxxx>
Cc: Huang Rui <ray.huang@xxxxxxx>
Cc: Jiri Slaby <jslaby@xxxxxxx>
Cc: Jonathan Corbet <corbet@xxxxxxx>
Cc: Michael S. Tsirkin <mst@xxxxxxxxxx>
Cc: Paul Gortmaker <paul.gortmaker@xxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Ravi V. Shankar <ravi.v.shankar@xxxxxxxxx>
Cc: Shuah Khan <shuah@xxxxxxxxxx>
Cc: Vlastimil Babka <vbabka@xxxxxxx>
Signed-off-by: Ricardo Neri <ricardo.neri-calderon@xxxxxxxxxxxxxxx>
---
arch/x86/include/asm/vm86.h | 3 +++
arch/x86/kernel/process.c | 10 ++++++++++
arch/x86/kernel/vm86_32.c | 20 ++++++++++++++++++++
3 files changed, 33 insertions(+)

diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index 1e491f3..bd14cbc 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -40,6 +40,7 @@ struct vm86 {
struct revectored_struct int_revectored;
struct revectored_struct int21_revectored;
struct vm86plus_info_struct vm86plus;
+ bool disable_x86_umip;
};

#ifdef CONFIG_VM86
@@ -47,6 +48,7 @@ struct vm86 {
void handle_vm86_fault(struct kernel_vm86_regs *, long);
int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
void save_v86_state(struct kernel_vm86_regs *, int);
+void __init vm86_disable_x86_umip(void);

struct task_struct;

@@ -76,6 +78,7 @@ void release_vm86_irqs(struct task_struct *);

#define handle_vm86_fault(a, b)
#define release_vm86_irqs(a)
+#define vm86_disable_x86_umip()

static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
{
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 0888a87..32b7301 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -233,6 +233,16 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
*/
memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
}
+
+#if defined(CONFIG_VM86) && defined(CONFIG_X86_INTEL_UMIP)
+ if (next->vm86 && next->vm86->saved_sp0 && next->vm86->disable_x86_umip)
+ cr4_clear_bits(X86_CR4_UMIP);
+ else {
+ if (static_cpu_has(X86_FEATURE_UMIP))
+ cr4_set_bits(X86_CR4_UMIP);
+ }
+#endif
+
propagate_user_return_notify(prev_p, next_p);
}

diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 01f30e5..7fd22e7 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -90,6 +90,14 @@
#define SAFE_MASK (0xDD5)
#define RETURN_MASK (0xDFF)

+static bool disable_x86_umip;
+
+void __init vm86_disable_x86_umip(void)
+{
+ if (cpu_feature_enabled(X86_FEATURE_UMIP))
+ disable_x86_umip = true;
+}
+
void save_v86_state(struct kernel_vm86_regs *regs, int retval)
{
struct tss_struct *tss;
@@ -156,6 +164,12 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
lazy_load_gs(vm86->regs32.gs);

regs->pt.ax = retval;
+
+
+ /* Always enable UMIP if supported */
+ if (cpu_feature_enabled(X86_FEATURE_UMIP) &&
+ static_cpu_has(X86_FEATURE_UMIP))
+ cr4_set_bits(X86_CR4_UMIP);
}

static void mark_screen_rdonly(struct mm_struct *mm)
@@ -371,6 +385,12 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
if (vm86->flags & VM86_SCREEN_BITMAP)
mark_screen_rdonly(tsk->mm);

+ if (cpu_feature_enabled(X86_FEATURE_UMIP)) {
+ vm86->disable_x86_umip = disable_x86_umip;
+ if (disable_x86_umip)
+ cr4_clear_bits(X86_CR4_UMIP);
+ }
+
memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
force_iret();
return regs->ax;
--
2.7.4