[PATCH v3 2/4] x86/mce: Add support for Extended Physical Address MCA changes

From: Smita Koralahalli
Date: Fri Feb 11 2022 - 17:35:46 EST


Newer AMD processors such as AMD 'Milan' support more physical address
bits.

That is the MCA_ADDR registers on Scalable MCA systems contain the
ErrorAddr in bits [56:0] instead of [55:0]. Hence the existing LSB field
from bits [61:56] in MCA_ADDR must be moved around to accommodate the
larger ErrorAddr size.

MCA_CONFIG[McaLsbInStatusSupported] indicates this change. If set, the
LSB field will be found in MCA_STATUS rather than MCA_ADDR.

Each logical CPU has unique MCA bank in hardware and is not shared with
other logical CPUs. Additionally on SMCA systems, each feature bit may be
different for each bank within same logical CPU.

Check for MCA_CONFIG[McaLsbInStatusSupported] for each MCA bank and for
each CPU.

Signed-off-by: Smita Koralahalli <Smita.KoralahalliChannabasappa@xxxxxxx>
Reviewed-by: Yazen Ghannam <yazen.ghannam@xxxxxxx>
---
Link:
https://lkml.kernel.org/r/20210625013341.231442-2-Smita.KoralahalliChannabasappa@xxxxxxx

v2:
Declared lsb_in_status in existing mce_bank[] struct.
Moved struct mce_bank[] declaration from core.c -> internal.h
v3:
Rebased on the latest tip tree. No functional changes.
---
arch/x86/include/asm/mce.h | 2 ++
arch/x86/kernel/cpu/mce/amd.c | 25 +++++++++++++++++++------
arch/x86/kernel/cpu/mce/core.c | 13 ++++---------
arch/x86/kernel/cpu/mce/internal.h | 14 ++++++++++++++
4 files changed, 39 insertions(+), 15 deletions(-)

diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 99a4c32cbdfa..cc67c74e8b46 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -338,6 +338,7 @@ extern int mce_threshold_remove_device(unsigned int cpu);
void mce_amd_feature_init(struct cpuinfo_x86 *c);
enum smca_bank_types smca_get_bank_type(unsigned int cpu, unsigned int bank);
void smca_extract_err_addr(struct mce *m);
+void smca_feature_init(void);
#else

static inline int mce_threshold_create_device(unsigned int cpu) { return 0; };
@@ -345,6 +346,7 @@ static inline int mce_threshold_remove_device(unsigned int cpu) { return 0; };
static inline bool amd_mce_is_memory_error(struct mce *m) { return false; };
static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
static inline void smca_extract_err_addr(struct mce *m) { }
+static inline void smca_feature_init(void) { }
#endif

static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); }
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index 981d718851a2..ed75d4bd2aff 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -724,9 +724,26 @@ bool amd_mce_is_memory_error(struct mce *m)

void smca_extract_err_addr(struct mce *m)
{
- u8 lsb = (m->addr >> 56) & 0x3f;
+ if (this_cpu_ptr(mce_banks_array)[m->bank].lsb_in_status) {
+ u8 lsb = (m->status >> 24) & 0x3f;

- m->addr &= GENMASK_ULL(55, lsb);
+ m->addr &= GENMASK_ULL(56, lsb);
+ } else {
+ u8 lsb = (m->addr >> 56) & 0x3f;
+
+ m->addr &= GENMASK_ULL(55, lsb);
+ }
+}
+
+void smca_feature_init(void)
+{
+ unsigned int bank;
+ u64 mca_cfg;
+
+ for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) {
+ rdmsrl(MSR_AMD64_SMCA_MCx_CONFIG(bank), mca_cfg);
+ this_cpu_ptr(mce_banks_array)[bank].lsb_in_status = !!(mca_cfg & BIT(8));
+ }
}

static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
@@ -743,10 +760,6 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
if (m.status & MCI_STATUS_ADDRV) {
m.addr = addr;

- /*
- * Extract [55:<lsb>] where lsb is the least significant
- * *valid* bit of the address bits.
- */
if (mce_flags.smca)
smca_extract_err_addr(&m);
}
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index f031f2668523..92adce850488 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -67,11 +67,7 @@ DEFINE_PER_CPU(unsigned, mce_exception_count);

DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);

-struct mce_bank {
- u64 ctl; /* subevents to enable */
- bool init; /* initialise bank? */
-};
-static DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);
+DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);

#define ATTR_LEN 16
/* One object for each MCE bank, shared by all CPUs */
@@ -660,10 +656,6 @@ static noinstr void mce_read_aux(struct mce *m, int i)
m->addr <<= shift;
}

- /*
- * Extract [55:<lsb>] where lsb is the least significant
- * *valid* bit of the address bits.
- */
if (mce_flags.smca)
smca_extract_err_addr(m);
}
@@ -1902,6 +1894,9 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR);
mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA);
mce_flags.amd_threshold = 1;
+
+ if (mce_flags.smca)
+ smca_feature_init();
}
}

diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index 52c633950b38..39dc37052cb9 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -175,6 +175,20 @@ struct mce_vendor_flags {

extern struct mce_vendor_flags mce_flags;

+struct mce_bank {
+ u64 ctl; /* subevents to enable */
+ bool init; /* initialise bank? */
+
+ /*
+ * (AMD) MCA_CONFIG[McaLsbInStatusSupported]: This bit indicates
+ * the LSB field is found in MCA_STATUS, when set.
+ */
+ __u64 lsb_in_status : 1,
+ __reserved_1 : 63;
+};
+
+DECLARE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);
+
enum mca_msr {
MCA_CTL,
MCA_STATUS,
--
2.17.1