[PATCH v3 4/8] KVM: x86/pmu: Make part of the Intel v2 PMU MSRs handling x86 generic

From: Like Xu
Date: Fri Nov 11 2022 - 05:27:20 EST


From: Like Xu <likexu@xxxxxxxxxxx>

The AMD PerfMonV2 defines three registers similar to part of the Intel
v2 PMU registers, including the GLOBAL_CTRL, GLOBAL_STATUS and
GLOBAL_OVF_CTRL MSRs. For better code reuse, this specific part of
the handling can be extracted to make it generic for X86 as a straight
code movement.

Specifically, move the kvm_pmu_set/get_msr() hanlders of GLOBAL_STATUS,
GLOBAL_CTRL, GLOBAL_OVF_CTRL defined by intel to generic pmu.c and
remove the callback function .pmc_is_globally_enabled, which is very
helpful to introduce the AMD PerfMonV2 code later.

The new non-prefix pmc_is_globally_enabled() works well as legacy AMD
vPMU version is indexed as 1. Note that the specific *_is_valid_msr will
continue to be used to avoid cross-vendor msr access.

Signed-off-by: Like Xu <likexu@xxxxxxxxxxx>
---
arch/x86/include/asm/kvm-x86-pmu-ops.h | 1 -
arch/x86/kvm/pmu.c | 55 +++++++++++++++++++++++---
arch/x86/kvm/pmu.h | 17 +++++++-
arch/x86/kvm/svm/pmu.c | 9 -----
arch/x86/kvm/vmx/pmu_intel.c | 46 +--------------------
5 files changed, 67 insertions(+), 61 deletions(-)

diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h
index 86a3fb01e103..6c98f4bb4228 100644
--- a/arch/x86/include/asm/kvm-x86-pmu-ops.h
+++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h
@@ -13,7 +13,6 @@ BUILD_BUG_ON(1)
* at the call sites.
*/
KVM_X86_PMU_OP(hw_event_available)
-KVM_X86_PMU_OP(pmc_is_globally_enabled)
KVM_X86_PMU_OP(pmc_idx_to_pmc)
KVM_X86_PMU_OP(rdpmc_ecx_to_pmc)
KVM_X86_PMU_OP(msr_idx_to_pmc)
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index e57f707fb940..a3726af5416d 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -83,11 +83,6 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
#undef __KVM_X86_PMU_OP
}

-static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
-{
- return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc);
-}
-
static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
{
struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
@@ -471,11 +466,61 @@ static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)

int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ u32 msr = msr_info->index;
+
+ switch (msr) {
+ case MSR_CORE_PERF_GLOBAL_STATUS:
+ msr_info->data = pmu->global_status;
+ return 0;
+ case MSR_CORE_PERF_GLOBAL_CTRL:
+ msr_info->data = pmu->global_ctrl;
+ return 0;
+ case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ msr_info->data = 0;
+ return 0;
+ default:
+ break;
+ }
+
return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
}

int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ u32 msr = msr_info->index;
+ u64 data = msr_info->data;
+ u64 diff;
+
+ switch (msr) {
+ case MSR_CORE_PERF_GLOBAL_STATUS:
+ if (!msr_info->host_initiated)
+ return 1; /* RO MSR */
+
+ pmu->global_status = data;
+ return 0;
+ case MSR_CORE_PERF_GLOBAL_CTRL:
+ if (!kvm_valid_perf_global_ctrl(pmu, data))
+ return 1;
+
+ if (pmu->global_ctrl != data) {
+ diff = pmu->global_ctrl ^ data;
+ pmu->global_ctrl = data;
+ reprogram_counters(pmu, diff);
+ }
+ return 0;
+ case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ if (data & pmu->global_ovf_ctrl_mask)
+ return 1;
+
+ if (!msr_info->host_initiated)
+ pmu->global_status &= ~data;
+ return 0;
+ default:
+ break;
+ }
+
kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
}
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index be552c8217a0..8739e5ea2835 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -26,7 +26,6 @@ struct kvm_event_hw_type_mapping {

struct kvm_pmu_ops {
bool (*hw_event_available)(struct kvm_pmc *pmc);
- bool (*pmc_is_globally_enabled)(struct kvm_pmc *pmc);
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
unsigned int idx, u64 *mask);
@@ -200,6 +199,22 @@ static inline void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
}
}

+/*
+ * Check if a PMC is enabled by comparing it against global_ctrl bits.
+ *
+ * If the current version of vPMU doesn't have global_ctrl MSR,
+ * all vPMCs are enabled (return TRUE).
+ */
+static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
+{
+ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+
+ if (pmu->version < 2)
+ return true;
+
+ return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
+}
+
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 7958a983b760..4e7d7e6cccec 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -76,14 +76,6 @@ static bool amd_hw_event_available(struct kvm_pmc *pmc)
return true;
}

-/* check if a PMC is enabled by comparing it against global_ctrl bits. Because
- * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
- */
-static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
-{
- return true;
-}
-
static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -218,7 +210,6 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)

struct kvm_pmu_ops amd_pmu_ops __initdata = {
.hw_event_available = amd_hw_event_available,
- .pmc_is_globally_enabled = amd_pmc_is_enabled,
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
.msr_idx_to_pmc = amd_msr_idx_to_pmc,
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index db704eea2d7c..f95f8d1db2cf 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -90,17 +90,6 @@ static bool intel_hw_event_available(struct kvm_pmc *pmc)
return true;
}

-/* check if a PMC is enabled by comparing it with globl_ctrl bits. */
-static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
-{
- struct kvm_pmu *pmu = pmc_to_pmu(pmc);
-
- if (!intel_pmu_has_perf_global_ctrl(pmu))
- return true;
-
- return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
-}
-
static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -335,15 +324,6 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_CORE_PERF_FIXED_CTR_CTRL:
msr_info->data = pmu->fixed_ctr_ctrl;
return 0;
- case MSR_CORE_PERF_GLOBAL_STATUS:
- msr_info->data = pmu->global_status;
- return 0;
- case MSR_CORE_PERF_GLOBAL_CTRL:
- msr_info->data = pmu->global_ctrl;
- return 0;
- case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
- msr_info->data = 0;
- return 0;
case MSR_IA32_PEBS_ENABLE:
msr_info->data = pmu->pebs_enable;
return 0;
@@ -391,29 +371,6 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (pmu->fixed_ctr_ctrl != data)
reprogram_fixed_counters(pmu, data);
break;
- case MSR_CORE_PERF_GLOBAL_STATUS:
- if (!msr_info->host_initiated)
- return 1; /* RO MSR */
-
- pmu->global_status = data;
- break;
- case MSR_CORE_PERF_GLOBAL_CTRL:
- if (!kvm_valid_perf_global_ctrl(pmu, data))
- return 1;
-
- if (pmu->global_ctrl != data) {
- diff = pmu->global_ctrl ^ data;
- pmu->global_ctrl = data;
- reprogram_counters(pmu, diff);
- }
- break;
- case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
- if (data & pmu->global_ovf_ctrl_mask)
- return 1;
-
- if (!msr_info->host_initiated)
- pmu->global_status &= ~data;
- break;
case MSR_IA32_PEBS_ENABLE:
if (data & pmu->pebs_enable_mask)
return 1;
@@ -773,7 +730,7 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
pmc = intel_pmc_idx_to_pmc(pmu, bit);

if (!pmc || !pmc_speculative_in_use(pmc) ||
- !intel_pmc_is_enabled(pmc) || !pmc->perf_event)
+ !pmc_is_globally_enabled(pmc) || !pmc->perf_event)
continue;

/*
@@ -788,7 +745,6 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)

struct kvm_pmu_ops intel_pmu_ops __initdata = {
.hw_event_available = intel_hw_event_available,
- .pmc_is_globally_enabled = intel_pmc_is_enabled,
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
.msr_idx_to_pmc = intel_msr_idx_to_pmc,
--
2.38.1