Re: [PATCH 15/24] x86/mm: Allow flushing for future ASID switches

From: Peter Zijlstra
Date: Tue Nov 28 2017 - 15:46:05 EST


On Tue, Nov 28, 2017 at 12:34:17PM -0800, Andy Lutomirski wrote:
> I think it should be fine. A very old version of the patches had that
> problem, but, in -tip, the nmi RESTORE_CR3 is in the fancy
> recursion-protected region, and the stack is okay. The idea is that
> we're already on the old (possibly user) CR3 before we do the crazy
> recursion-checking bits. But that's fine, since all that's accessed
> there is the IST stack, and that's in the cpu_entry_area and thus safe
> regardless of CR3.

Turns out there's a gob of spare registers to be had on RESTORE_CR3, we
do POP_EXTRA_REGS right after both call-sites. So I just picked
something from there.


diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 07fa7fdd7b68..9617b7c642db 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -4,6 +4,7 @@
#include <asm/cpufeatures.h>
#include <asm/page_types.h>
#include <asm/pgtable_types.h>
+#include <asm/percpu.h>

/*

@@ -220,15 +221,29 @@ For 32-bit we have the following conventions - kernel is built with
.macro SWITCH_TO_USER_CR3 scratch_reg:req
STATIC_JUMP_IF_FALSE .Lend_\@, kaiser_enabled_key, def=1
mov %cr3, \scratch_reg
- ADJUST_USER_CR3 \scratch_reg
+ push \scratch_reg
+ andq $(0x7FF), \scratch_reg
+ bt \scratch_reg, PER_CPU_VAR(__asid_flush)
+ jnc .Lnoflush_\@
+
+ btr \scratch_reg, PER_CPU_VAR(__asid_flush)
+ pop \scratch_reg
+ jmp .Ldo_\@
+
+.Lnoflush_\@:
+ pop \scratch_reg
+ ALTERNATIVE "", "bts $63, \scratch_reg", X86_FEATURE_PCID
+
+.Ldo_\@:
+ orq $(KAISER_SWITCH_MASK), \scratch_reg
mov \scratch_reg, %cr3
.Lend_\@:
.endm

.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
STATIC_JUMP_IF_FALSE .Ldone_\@, kaiser_enabled_key, def=1
- movq %cr3, %r\scratch_reg
- movq %r\scratch_reg, \save_reg
+ movq %cr3, \scratch_reg
+ movq \scratch_reg, \save_reg
/*
* Is the "switch mask" all zero? That means that both of
* these are zero:
@@ -239,17 +254,42 @@ For 32-bit we have the following conventions - kernel is built with
*
* That indicates a kernel CR3 value, not user/shadow.
*/
- testq $(KAISER_SWITCH_MASK), %r\scratch_reg
+ testq $(KAISER_SWITCH_MASK), \scratch_reg
jz .Ldone_\@

- ADJUST_KERNEL_CR3 %r\scratch_reg
- movq %r\scratch_reg, %cr3
+ ADJUST_KERNEL_CR3 \scratch_reg
+ movq \scratch_reg, %cr3

.Ldone_\@:
.endm

-.macro RESTORE_CR3 save_reg:req
+.macro RESTORE_CR3 scratch_reg:req save_reg:req
STATIC_JUMP_IF_FALSE .Lend_\@, kaiser_enabled_key, def=1
+
+ /* ASID bit 11 is for USER */
+ bt $11, \save_reg
+ /*
+ * KERNEL pages can always resume with NOFLUSH as we do
+ * explicit flushes.
+ */
+ jnc .Lnoflush_\@
+
+ /*
+ * Check if there's a pending flush for the USER ASID we're
+ * about to set.
+ */
+ movq \save_reg, \scratch_reg
+ andq $(0x7FF), \scratch_reg
+ bt \scratch_reg, PER_CPU_VAR(__asid_flush)
+ jnc .Lnoflush_\@
+
+ btr \scratch_reg, PER_CPU_VAR(__asid_flush)
+ jmp .Ldo_\@
+
+.Lnoflush_\@:
+ ALTERNATIVE "", "bts $63, \save_reg", X86_FEATURE_PCID
+
+.Ldo_\@:
/*
* The CR3 write could be avoided when not changing its value,
* but would require a CR3 read *and* a scratch register.
@@ -266,7 +306,7 @@ For 32-bit we have the following conventions - kernel is built with
.endm
.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
.endm
-.macro RESTORE_CR3 save_reg:req
+.macro RESTORE_CR3 scratch_reg:req save_reg:req
.endm

#endif
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index df0152bee8a8..39233c58f14a 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1257,7 +1254,7 @@ ENTRY(paranoid_entry)
xorl %ebx, %ebx

1:
- SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=ax save_reg=%r14
+ SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14

ret
END(paranoid_entry)
@@ -1281,7 +1278,7 @@ ENTRY(paranoid_exit)
testl %ebx, %ebx /* swapgs needed? */
jnz .Lparanoid_exit_no_swapgs
TRACE_IRQS_IRETQ
- RESTORE_CR3 save_reg=%r14
+ RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
SWAPGS_UNSAFE_STACK
jmp .Lparanoid_exit_restore
.Lparanoid_exit_no_swapgs:
@@ -1723,7 +1720,7 @@ end_repeat_nmi:
movq $-1, %rsi
call do_nmi

- RESTORE_CR3 save_reg=%r14
+ RESTORE_CR3 scratch_reg=%r15 save_reg=%r14

testl %ebx, %ebx /* swapgs needed? */
jnz nmi_restore
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 27eb7e8c5e84..1fb137da4c9f 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -9,6 +9,7 @@
#include <asm/cpufeature.h>
#include <asm/special_insns.h>
#include <asm/smp.h>
+#include <asm/kaiser.h>

static inline void __invpcid(unsigned long pcid, unsigned long addr,
unsigned long type)
@@ -347,9 +348,33 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)

extern void initialize_tlbstate_and_flush(void);

+DECLARE_PER_CPU(unsigned long, __asid_flush);
+
+/*
+ * Given an asid, flush the corresponding KAISER user ASID.
+ */
+static inline void flush_user_asid(u16 asid)
+{
+ /* There is no user ASID if KAISER is off */
+ if (!IS_ENABLED(CONFIG_KAISER))
+ return;
+ /*
+ * We only have a single ASID if PCID is off and the CR3
+ * write will have flushed it.
+ */
+ if (!cpu_feature_enabled(X86_FEATURE_PCID))
+ return;
+
+ if (!kaiser_enabled)
+ return;
+
+ __set_bit(kern_asid(asid), this_cpu_ptr(&__asid_flush));
+}
+
static inline void __native_flush_tlb(void)
{
if (!cpu_feature_enabled(X86_FEATURE_INVPCID)) {
+#if 0
/*
* native_write_cr3() only clears the current PCID if
* CR4 has X86_CR4_PCIDE set. In other words, this does
@@ -358,9 +383,10 @@ static inline void __native_flush_tlb(void)
* With KAISER and PCIDs, the means that we did not
* flush the user PCID. Warn if it gets called.
*/
- if (IS_ENABLED(CONFIG_KAISER))
- WARN_ON_ONCE(this_cpu_read(cpu_tlbstate.cr4) &
- X86_CR4_PCIDE);
+ if (IS_ENABLED(CONFIG_KAISER) && kaiser_enabled)
+ WARN_ON_ONCE(this_cpu_read(cpu_tlbstate.cr4) & X86_CR4_PCIDE);
+#endif
+ flush_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
/*
* If current->mm == NULL then we borrow a mm
* which may change during a task switch and
@@ -435,6 +461,8 @@ static inline void __native_flush_tlb_single(unsigned long addr)
* early.
*/
if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) {
+ flush_user_asid(loaded_mm_asid);
+
asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
return;
}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 72f115178d14..2dcd01615772 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -218,11 +218,13 @@ static void setup_pcid(void)
* INVPCID. Just avoid using PCIDs at all if we
* have KAISER and do not have INVPCID.
*/
+#if 0
if (!IS_ENABLED(CONFIG_X86_GLOBAL_PAGES) &&
- !boot_cpu_has(X86_FEATURE_INVPCID)) {
+ kaiser_enabled && !boot_cpu_has(X86_FEATURE_INVPCID)) {
setup_clear_cpu_cap(X86_FEATURE_PCID);
return;
}
+#endif
/*
* This can't be cr4_set_bits_and_update_boot() --
* the trampoline code can't handle CR4.PCIDE and
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index f75b6eb47a6d..4ed1d0dfd54f 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -100,55 +100,14 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
*need_flush = true;
}

-/*
- * Given a kernel asid, flush the corresponding KAISER
- * user ASID.
- */
-static void flush_user_asid(pgd_t *pgd, u16 kern_asid)
-{
- /* There is no user ASID if KAISER is off */
- if (!IS_ENABLED(CONFIG_KAISER))
- return;
- /*
- * We only have a single ASID if PCID is off and the CR3
- * write will have flushed it.
- */
- if (!cpu_feature_enabled(X86_FEATURE_PCID))
- return;
- /*
- * With PCIDs enabled, write_cr3() only flushes TLB
- * entries for the current (kernel) ASID. This leaves
- * old TLB entries for the user ASID in place and we must
- * flush that context separately. We can theoretically
- * delay doing this until we actually load up the
- * userspace CR3, but do it here for simplicity.
- */
- if (cpu_feature_enabled(X86_FEATURE_INVPCID)) {
- invpcid_flush_single_context(user_asid(kern_asid));
- } else {
- /*
- * On systems with PCIDs, but no INVPCID, the only
- * way to flush a PCID is a CR3 write. Note that
- * we use the kernel page tables with the *user*
- * ASID here.
- */
- unsigned long user_asid_flush_cr3;
- user_asid_flush_cr3 = build_cr3(pgd, user_asid(kern_asid));
- write_cr3(user_asid_flush_cr3);
- /*
- * We do not use PCIDs with KAISER unless we also
- * have INVPCID. Getting here is unexpected.
- */
- WARN_ON_ONCE(1);
- }
-}
+__visible DEFINE_PER_CPU(unsigned long, __asid_flush);

static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush)
{
unsigned long new_mm_cr3;

if (need_flush) {
- flush_user_asid(pgdir, new_asid);
+ flush_user_asid(new_asid);
new_mm_cr3 = build_cr3(pgdir, new_asid);
} else {
new_mm_cr3 = build_cr3_noflush(pgdir, new_asid);