Re: [PATCH] KVM: x86: Extend Spectre-v1 mitigation

From: Sean Christopherson
Date: Fri Nov 22 2019 - 14:45:48 EST


On Fri, Nov 22, 2019 at 10:40:39AM -0800, Marios Pomonis wrote:
> From: Nick Finco <nifi@xxxxxxxxxx>
>
> This extends the Spectre-v1 mitigation introduced in
> commit 75f139aaf896 ("KVM: x86: Add memory barrier on vmcs field lookup")
> and commit 085331dfc6bb ("x86/kvm: Update spectre-v1 mitigation") in light
> of the Spectre-v1/L1TF combination described here:
> https://xenbits.xen.org/xsa/advisory-289.html
>
> As reported in the link, an attacker can use the cache-load part of a
> Spectre-v1 gadget to bring memory into the L1 cache, then use L1TF to
> leak the loaded memory. Note that this attack is not fully mitigated by
> core scheduling; an attacker could employ L1TF on the same thread that
> loaded the memory in L1 instead of relying on neighboring hyperthreads.
>
> This patch uses array_index_nospec() to prevent index computations from
> causing speculative loads into the L1 cache. These cases involve a
> bounds check followed by a memory read using the index; this is more
> common than the full Spectre-v1 pattern. In some cases, the index
> computation can be eliminated entirely by small amounts of refactoring.
>
> Signed-off-by: Nick Finco <nifi@xxxxxxxxxx>
> Signed-off-by: Marios Pomonis <pomonis@xxxxxxxxxx>

+cc stable?

> Acked-by: Andrew Honig <ahonig@xxxxxxxxxx>
> ---
> arch/x86/kvm/emulate.c | 11 ++++++++---
> arch/x86/kvm/hyperv.c | 10 ++++++----
> arch/x86/kvm/i8259.c | 6 +++++-
> arch/x86/kvm/ioapic.c | 15 +++++++++------
> arch/x86/kvm/lapic.c | 13 +++++++++----
> arch/x86/kvm/mtrr.c | 8 ++++++--
> arch/x86/kvm/pmu.h | 18 ++++++++++++++----
> arch/x86/kvm/vmx/pmu_intel.c | 24 ++++++++++++++++--------
> arch/x86/kvm/vmx/vmx.c | 22 ++++++++++++++++------
> arch/x86/kvm/x86.c | 18 ++++++++++++++----
> 10 files changed, 103 insertions(+), 42 deletions(-)

Can you split this up into multiple patches? I assume this needs to be
backported to stable, and throwing everything into a single patch will
make the process unnecessarily painful. Reviewing things would also be
a lot easier.

...

> @@ -5828,6 +5836,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
> {
> struct vcpu_vmx *vmx = to_vmx(vcpu);
> u32 exit_reason = vmx->exit_reason;
> + u32 bounded_exit_reason = array_index_nospec(exit_reason,
> + kvm_vmx_max_exit_handlers);
> u32 vectoring_info = vmx->idt_vectoring_info;
>
> trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
> @@ -5911,7 +5921,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
> }
>
> if (exit_reason < kvm_vmx_max_exit_handlers
> - && kvm_vmx_exit_handlers[exit_reason]) {
> + && kvm_vmx_exit_handlers[bounded_exit_reason]) {
> #ifdef CONFIG_RETPOLINE
> if (exit_reason == EXIT_REASON_MSR_WRITE)
> return kvm_emulate_wrmsr(vcpu);
> @@ -5926,7 +5936,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
> else if (exit_reason == EXIT_REASON_EPT_MISCONFIG)
> return handle_ept_misconfig(vcpu);
> #endif
> - return kvm_vmx_exit_handlers[exit_reason](vcpu);
> + return kvm_vmx_exit_handlers[bounded_exit_reason](vcpu);
> } else {
> vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
> exit_reason);

Oof, using exit_reason for the comparison is subtle. Rather than
precompute the bounded exit reason, what about refactoring the code so
that the array_index_nospec() tomfoolery can be done after the actual
bounds check? This would also avoid the nospec stuff when using the
direct retpoline handling.

---
arch/x86/kvm/vmx/vmx.c | 56 ++++++++++++++++++++++--------------------
1 file changed, 30 insertions(+), 26 deletions(-)

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index d39475e2d44e..14c2efd66300 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -5910,34 +5910,38 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
}
}

- if (exit_reason < kvm_vmx_max_exit_handlers
- && kvm_vmx_exit_handlers[exit_reason]) {
+ if (exit_reason >= kvm_vmx_max_exit_handlers)
+ goto unexpected_vmexit;
+
#ifdef CONFIG_RETPOLINE
- if (exit_reason == EXIT_REASON_MSR_WRITE)
- return kvm_emulate_wrmsr(vcpu);
- else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER)
- return handle_preemption_timer(vcpu);
- else if (exit_reason == EXIT_REASON_PENDING_INTERRUPT)
- return handle_interrupt_window(vcpu);
- else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
- return handle_external_interrupt(vcpu);
- else if (exit_reason == EXIT_REASON_HLT)
- return kvm_emulate_halt(vcpu);
- else if (exit_reason == EXIT_REASON_EPT_MISCONFIG)
- return handle_ept_misconfig(vcpu);
+ if (exit_reason == EXIT_REASON_MSR_WRITE)
+ return kvm_emulate_wrmsr(vcpu);
+ else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER)
+ return handle_preemption_timer(vcpu);
+ else if (exit_reason == EXIT_REASON_PENDING_INTERRUPT)
+ return handle_interrupt_window(vcpu);
+ else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
+ return handle_external_interrupt(vcpu);
+ else if (exit_reason == EXIT_REASON_HLT)
+ return kvm_emulate_halt(vcpu);
+ else if (exit_reason == EXIT_REASON_EPT_MISCONFIG)
+ return handle_ept_misconfig(vcpu);
#endif
- return kvm_vmx_exit_handlers[exit_reason](vcpu);
- } else {
- vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
- exit_reason);
- dump_vmcs();
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror =
- KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
- vcpu->run->internal.ndata = 1;
- vcpu->run->internal.data[0] = exit_reason;
- return 0;
- }
+
+ exit_reason = array_index_nospec(exit_reason, kvm_vmx_max_exit_handlers);
+ if (!kvm_vmx_exit_handlers[exit_reason])
+ goto unexpected_vmexit;
+
+ return kvm_vmx_exit_handlers[exit_reason](vcpu);
+
+unexpected_vmexit:
+ vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", exit_reason);
+ dump_vmcs();
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
+ vcpu->run->internal.ndata = 1;
+ vcpu->run->internal.data[0] = exit_reason;
+ return 0;
}

/*
--
2.24.0