[PATCH v2 1/3] xen/pv: allow pmu msr accesses to cause GP

From: Juergen Gross
Date: Tue Oct 04 2022 - 04:44:28 EST


Today pmu_msr_read() and pmu_msr_write() fall back to the safe variants
of read/write MSR in case the MSR access isn't emulated via Xen. Allow
the caller to select the potentially faulting variant by passing NULL
for the error pointer.

Restructure the code to make it more readable.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
V2:
- do some restructuring (Jan Beulich, Boris Ostrovsky)
---
arch/x86/xen/pmu.c | 61 +++++++++++++++++++++++++---------------------
1 file changed, 33 insertions(+), 28 deletions(-)

diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index 21ecbe754cb2..501b6f872d96 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -131,6 +131,9 @@ static inline uint32_t get_fam15h_addr(u32 addr)

static inline bool is_amd_pmu_msr(unsigned int msr)
{
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ return false;
+
if ((msr >= MSR_F15H_PERF_CTL &&
msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) ||
(msr >= MSR_K7_EVNTSEL0 &&
@@ -144,6 +147,9 @@ static int is_intel_pmu_msr(u32 msr_index, int *type, int *index)
{
u32 msr_index_pmc;

+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return false;
+
switch (msr_index) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
case MSR_IA32_DS_AREA:
@@ -292,46 +298,45 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read)

bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
- if (is_amd_pmu_msr(msr)) {
- if (!xen_amd_pmu_emulate(msr, val, 1))
- *val = native_read_msr_safe(msr, err);
- return true;
- }
- } else {
- int type, index;
+ int type, index;
+ bool emulated;

- if (is_intel_pmu_msr(msr, &type, &index)) {
- if (!xen_intel_pmu_emulate(msr, val, type, index, 1))
- *val = native_read_msr_safe(msr, err);
- return true;
- }
+ if (is_amd_pmu_msr(msr))
+ emulated = xen_amd_pmu_emulate(msr, val, 1);
+ else if (is_intel_pmu_msr(msr, &type, &index))
+ emulated = xen_intel_pmu_emulate(msr, val, type, index, 1);
+ else
+ return false;
+
+ if (!emulated) {
+ *val = err ? native_read_msr_safe(msr, err)
+ : native_read_msr(msr);
}

- return false;
+ return true;
}

bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
{
uint64_t val = ((uint64_t)high << 32) | low;
+ int type, index;
+ bool emulated;

- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
- if (is_amd_pmu_msr(msr)) {
- if (!xen_amd_pmu_emulate(msr, &val, 0))
- *err = native_write_msr_safe(msr, low, high);
- return true;
- }
- } else {
- int type, index;
+ if (is_amd_pmu_msr(msr))
+ emulated = xen_amd_pmu_emulate(msr, &val, 0);
+ else if (is_intel_pmu_msr(msr, &type, &index))
+ emulated = xen_intel_pmu_emulate(msr, &val, type, index, 0);
+ else
+ return false;

- if (is_intel_pmu_msr(msr, &type, &index)) {
- if (!xen_intel_pmu_emulate(msr, &val, type, index, 0))
- *err = native_write_msr_safe(msr, low, high);
- return true;
- }
+ if (!emulated) {
+ if (err)
+ *err = native_write_msr_safe(msr, low, high);
+ else
+ native_write_msr(msr, low, high);
}

- return false;
+ return true;
}

static unsigned long long xen_amd_read_pmc(int counter)
--
2.35.3