[PATCH RFC 2/2] KVM: x86/vPMU: ignore access to LBR-related MSRs

From: Jan Dakinevich
Date: Wed Dec 06 2017 - 06:47:22 EST


Windows Server 2016 Essentials (for yet unknown reason) attempts to
access MSR_LBR_TOS and other LBR-related registers at startup. These
are not currently hadled by KVM so the guest gets #GP and crashes.

To prevent that, identify LBR-related MSRs pertinent to the CPU model
exposed to the guest, and dummy handle them (ignore writes and return
zero on reads).

Signed-off-by: Jan Dakinevich <jan.dakinevich@xxxxxxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 2 ++
arch/x86/kvm/pmu_intel.c | 33 +++++++++++++++++++++++++++++++++
2 files changed, 35 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 977de5f..04324dd 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -34,6 +34,7 @@
#include <asm/msr-index.h>
#include <asm/asm.h>
#include <asm/kvm_page_track.h>
+#include <asm/perf_event.h>

#define KVM_MAX_VCPUS 288
#define KVM_SOFT_MAX_VCPUS 240
@@ -416,6 +417,7 @@ struct kvm_pmu {
struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
struct irq_work irq_work;
u64 reprogram_pmi;
+ struct x86_pmu_lbr lbr;
};

struct kvm_pmu_ops;
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index 5ab4a36..7edf191 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -142,6 +142,24 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
return &counters[idx];
}

+static bool intel_is_lbr_msr(struct kvm_vcpu *vcpu, u32 msr)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct x86_pmu_lbr *lbr = &pmu->lbr;
+
+ if (!lbr->nr)
+ return false;
+
+ if (msr == lbr->tos)
+ return true;
+ if (msr >= lbr->from && msr < lbr->from + lbr->nr)
+ return true;
+ if (msr >= lbr->to && msr < lbr->to + lbr->nr)
+ return true;
+
+ return false;
+}
+
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -155,6 +173,10 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
ret = pmu->version > 1;
break;
default:
+ if (intel_is_lbr_msr(vcpu, msr)) {
+ ret = true;
+ break;
+ }
ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
get_fixed_pmc(pmu, msr);
@@ -183,6 +205,10 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
*data = pmu->global_ovf_ctrl;
return 0;
default:
+ if (intel_is_lbr_msr(vcpu, msr)) {
+ *data = 0;
+ return 0;
+ }
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_fixed_pmc(pmu, msr))) {
*data = pmc_read_counter(pmc);
@@ -235,6 +261,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
break;
default:
+ if (intel_is_lbr_msr(vcpu, msr))
+ return 0;
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_fixed_pmc(pmu, msr))) {
if (!msr_info->host_initiated)
@@ -303,6 +331,11 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
(boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
(entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
+
+ entry = kvm_find_cpuid_entry(vcpu, 1, 0);
+ if (entry)
+ intel_pmu_lbr_fill(&pmu->lbr,
+ x86_family(entry->eax), x86_model(entry->eax));
}

static void intel_pmu_init(struct kvm_vcpu *vcpu)
--
2.1.4