[PATCH v3 05/11] KVM: selftests: Test consistency of CPUID with num of gp counters

From: Jinrong Liang
Date: Mon Aug 14 2023 - 07:52:38 EST


From: Jinrong Liang <cloudliang@xxxxxxxxxxx>

Add test to check if non-existent counters can be accessed in guest after
determining the number of Intel generic performance counters by CPUID.
When the num of counters is less than 3, KVM does not emulate #GP if
a counter isn't present due to compatibility MSR_P6_PERFCTRx handling.
Nor will the KVM emulate more counters than it can support.

Co-developed-by: Like Xu <likexu@xxxxxxxxxxx>
Signed-off-by: Like Xu <likexu@xxxxxxxxxxx>
Signed-off-by: Jinrong Liang <cloudliang@xxxxxxxxxxx>
---
.../kvm/x86_64/pmu_basic_functionality_test.c | 78 +++++++++++++++++++
1 file changed, 78 insertions(+)

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c
index daa45aa285bb..b86033e51d5c 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c
@@ -16,6 +16,11 @@
/* Guest payload for any performance counter counting */
#define NUM_BRANCHES 10

+static const uint64_t perf_caps[] = {
+ 0,
+ PMU_CAP_FW_WRITES,
+};
+
static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
void *guest_code)
{
@@ -164,6 +169,78 @@ static void intel_test_arch_events(void)
}
}

+static void guest_wr_and_rd_msrs(uint32_t base, uint8_t begin, uint8_t offset)
+{
+ uint8_t wr_vector, rd_vector;
+ uint64_t msr_val;
+ unsigned int i;
+
+ for (i = begin; i < begin + offset; i++) {
+ wr_vector = wrmsr_safe(base + i, 0xffff);
+ rd_vector = rdmsr_safe(base + i, &msr_val);
+ if (wr_vector == GP_VECTOR || rd_vector == GP_VECTOR)
+ GUEST_SYNC(GP_VECTOR);
+ else
+ GUEST_SYNC(msr_val);
+ }
+
+ GUEST_DONE();
+}
+
+/* Access the first out-of-range counter register to trigger #GP */
+static void test_oob_gp_counter(uint8_t eax_gp_num, uint8_t offset,
+ uint64_t perf_cap, uint64_t expected)
+{
+ uint32_t ctr_msr = MSR_IA32_PERFCTR0;
+ struct kvm_vcpu *vcpu;
+ uint64_t msr_val = 0;
+ struct kvm_vm *vm;
+
+ vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_wr_and_rd_msrs);
+
+ vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_NR_GP_COUNTERS,
+ eax_gp_num);
+
+ if (perf_cap & PMU_CAP_FW_WRITES)
+ ctr_msr = MSR_IA32_PMC0;
+
+ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, perf_cap);
+ vcpu_args_set(vcpu, 3, ctr_msr, eax_gp_num, offset);
+ while (run_vcpu(vcpu, &msr_val) != UCALL_DONE)
+ TEST_ASSERT_EQ(expected, msr_val);
+
+ kvm_vm_free(vm);
+}
+
+static void intel_test_counters_num(void)
+{
+ uint8_t nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
+ unsigned int i;
+
+ TEST_REQUIRE(nr_gp_counters > 2);
+
+ for (i = 0; i < ARRAY_SIZE(perf_caps); i++) {
+ /*
+ * For compatibility reasons, KVM does not emulate #GP
+ * when MSR_P6_PERFCTR[0|1] is not present, but it doesn't
+ * affect checking the presence of MSR_IA32_PMCx with #GP.
+ */
+ if (perf_caps[i] & PMU_CAP_FW_WRITES)
+ test_oob_gp_counter(0, 1, perf_caps[i], GP_VECTOR);
+
+ test_oob_gp_counter(2, 1, perf_caps[i], GP_VECTOR);
+ test_oob_gp_counter(nr_gp_counters, 1, perf_caps[i], GP_VECTOR);
+
+ /* KVM doesn't emulate more counters than it can support. */
+ test_oob_gp_counter(nr_gp_counters + 1, 1, perf_caps[i],
+ GP_VECTOR);
+
+ /* Test that KVM drops writes to MSR_P6_PERFCTR[0|1]. */
+ if (!perf_caps[i])
+ test_oob_gp_counter(0, 2, perf_caps[i], 0);
+ }
+}
+
int main(int argc, char *argv[])
{
TEST_REQUIRE(get_kvm_param_bool("enable_pmu"));
@@ -174,6 +251,7 @@ int main(int argc, char *argv[])
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM));

intel_test_arch_events();
+ intel_test_counters_num();

return 0;
}
--
2.39.3